repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
loveyoupeng/rt | modules/web/src/main/native/Tools/gtk/common.py | 2 | 6127 | #!/usr/bin/env python
# Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import os
import select
import subprocess
import sys
script_dir = None
build_dir = None
library_build_dir = None
tests_library_build_dir = None
is_cmake = None
build_types = ('Release', 'Debug')
def set_build_types(new_build_types):
global build_types
build_types = new_build_types
def is_cmake_build():
global is_cmake
if is_cmake is None:
is_cmake = os.path.exists(build_path('CMakeCache.txt'))
return is_cmake
def library_build_path(*args):
global library_build_dir
if not library_build_dir:
if is_cmake_build():
library_build_dir = build_path('lib', *args)
else:
library_build_dir = build_path('.libs', *args)
return library_build_dir
def tests_library_build_path(*args):
if is_cmake_build():
return library_build_path(*args)
global tests_library_build_dir
if not tests_library_build_dir:
tests_library_build_dir = build_path('Libraries', *args)
return tests_library_build_dir
def binary_build_path(*args):
global library_build_dir
if not library_build_dir:
if is_cmake_build():
library_build_dir = build_path('bin', *args)
else:
library_build_dir = build_path('Programs', *args)
return library_build_dir
def script_path(*args):
global script_dir
if not script_dir:
script_dir = os.path.join(os.path.dirname(__file__), '..', 'Scripts')
return os.path.join(*(script_dir,) + args)
def top_level_path(*args):
return os.path.join(*((os.path.join(os.path.dirname(__file__), '..', '..'),) + args))
def get_build_path(fatal=True):
global build_dir
if build_dir:
return build_dir
def is_valid_build_directory(path):
return os.path.exists(os.path.join(path, 'GNUmakefile')) or \
os.path.exists(os.path.join(path, 'Programs', 'GtkLauncher')) or \
os.path.exists(os.path.join(path, 'Programs', 'MiniBrowser')) or \
os.path.exists(os.path.join(path, 'CMakeCache.txt')) or \
os.path.exists(os.path.join(path, 'bin/GtkLauncher')) or \
os.path.exists(os.path.join(path, 'bin/MiniBrowser'))
if len(sys.argv[1:]) > 1 and os.path.exists(sys.argv[-1]) and is_valid_build_directory(sys.argv[-1]):
return sys.argv[-1]
# Debian and Ubuntu build both flavours of the library (with gtk2
# and with gtk3); they use directories build-2.0 and build-3.0 for
# that, which is not handled by the above cases; we check that the
# directory where we are called from is a valid build directory,
# which should handle pretty much all other non-standard cases.
build_dir = os.getcwd()
if is_valid_build_directory(build_dir):
return build_dir
global build_types
for build_type in build_types:
build_dir = top_level_path('WebKitBuild', build_type)
if is_valid_build_directory(build_dir):
return build_dir
# distcheck builds in a directory named _build in the top-level path.
build_dir = top_level_path("_build")
if is_valid_build_directory(build_dir):
return build_dir
build_dir = top_level_path()
if is_valid_build_directory(build_dir):
return build_dir
build_dir = top_level_path("WebKitBuild")
if is_valid_build_directory(build_dir):
return build_dir
print('Could not determine build directory.')
if fatal:
sys.exit(1)
def build_path(*args):
return os.path.join(*(get_build_path(),) + args)
def pkg_config_file_variable(package, variable):
process = subprocess.Popen(['pkg-config', '--variable=%s' % variable, package],
stdout=subprocess.PIPE)
stdout = process.communicate()[0].decode("utf-8")
if process.returncode:
return None
return stdout.strip()
def prefix_of_pkg_config_file(package):
return pkg_config_file_variable(package, 'prefix')
def gtk_version_of_pkg_config_file(pkg_config_path):
process = subprocess.Popen(['pkg-config', pkg_config_path, '--print-requires'],
stdout=subprocess.PIPE)
stdout = process.communicate()[0].decode("utf-8")
if 'gtk+-3.0' in stdout:
return 3
return 2
def parse_output_lines(fd, parse_line_callback):
output = ''
read_set = [fd]
while read_set:
try:
rlist, wlist, xlist = select.select(read_set, [], [])
except select.error as e:
parse_line_callback("WARNING: error while waiting for fd %d to become readable\n" % fd)
parse_line_callback(" error code: %d, error message: %s\n" % (e[0], e[1]))
continue
if fd in rlist:
try:
chunk = os.read(fd, 1024)
except OSError as e:
if e.errno == errno.EIO:
# Child process finished.
chunk = ''
else:
raise e
if not chunk:
read_set.remove(fd)
output += chunk
while '\n' in output:
pos = output.find('\n')
parse_line_callback(output[:pos + 1])
output = output[pos + 1:]
if not chunk and output:
parse_line_callback(output)
output = ''
| gpl-2.0 |
kustodian/ansible-modules-core | cloud/digital_ocean/digital_ocean_sshkey.py | 37 | 4948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey: >
state=present
name=my_ssh_key
ssh_pub_key='ssh-rsa AAAA...'
client_id=XXX
api_key=XXX
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
munyirik/python | cpython/Lib/test/test_compile.py | 2 | 21808 | import math
import os
import unittest
import sys
import _ast
import tempfile
import types
from test import support
from test.support import script_helper
class TestSpecifics(unittest.TestCase):
def compile_single(self, source):
compile(source, "<single>", "single")
def assertInvalidSingle(self, source):
self.assertRaises(SyntaxError, self.compile_single, source)
def test_no_ending_newline(self):
compile("hi", "<test>", "exec")
compile("hi\r", "<test>", "exec")
def test_empty(self):
compile("", "<test>", "exec")
def test_other_newlines(self):
compile("\r\n", "<test>", "exec")
compile("\r", "<test>", "exec")
compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec")
compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec")
def test_debug_assignment(self):
# catch assignments to __debug__
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import builtins
prev = builtins.__debug__
setattr(builtins, '__debug__', 'sure')
setattr(builtins, '__debug__', prev)
def test_argument_handling(self):
# detect duplicate positional and keyword arguments
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
self.assertRaises(SyntaxError, exec, 'def f(a, a): pass')
self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass')
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_none_keyword_arg(self):
self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec")
def test_duplicate_global_local(self):
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec('z = a', g, m)
self.assertEqual(m.results, ('z', 12))
try:
exec('z = b', g, m)
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec('z = dir()', g, m)
self.assertEqual(m.results, ('z', list('xyz')))
exec('z = globals()', g, m)
self.assertEqual(m.results, ('z', g))
exec('z = locals()', g, m)
self.assertEqual(m.results, ('z', m))
self.assertRaises(TypeError, exec, 'z = b', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, exec, 'z = a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec('z = a', g, d)
self.assertEqual(d['z'], 12)
def test_extended_arg(self):
longexpr = 'x = x or ' + '-x' * 2500
g = {}
code = '''
def f(x):
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
# the expressions above have no effect, x == argument
while x:
x -= 1
# EXTENDED_ARG/JUMP_ABSOLUTE here
return x
''' % ((longexpr,)*10)
exec(code, g)
self.assertEqual(g['f'](5), 0)
def test_argument_order(self):
self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass')
def test_float_literals(self):
# testing bad float literals
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
# testing compile() of indented block w/o trailing newline"
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
# This test is probably specific to CPython and may not generalize
# to other implementations. We are trying to ensure that when
# the first line of code starts after 256, correct line numbers
# in tracebacks are still produced.
def test_leading_newlines(self):
s256 = "".join(["\n"] * 256 + ["spam"])
co = compile(s256, 'fn', 'exec')
self.assertEqual(co.co_firstlineno, 257)
self.assertEqual(co.co_lnotab, bytes())
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008",
"0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2",
"0b101j2", "0o153j2", "0b100e1", "0o777e1", "0777",
"000777", "000000000000007"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000e-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("000"), 0)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
self.assertEqual(eval("0b101010"), 42)
self.assertEqual(eval("-0b000000000010"), -2)
self.assertEqual(eval("0o777"), 511)
self.assertEqual(eval("-0o0000010"), -8)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxsize == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295)
self.assertEqual(eval("-" + all_one_bits), -4294967295)
elif sys.maxsize == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615)
else:
self.fail("How many bits *does* this machine have???")
# Verify treatment of constant folding on -(sys.maxsize+1)
# i.e. -2147483648 on 32 bit platforms. Should return int.
self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int)
self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int)
if sys.maxsize == 9223372036854775807:
def test_32_63_bit_values(self):
a = +4294967296 # 1 << 32
b = -4294967296 # 1 << 32
c = +281474976710656 # 1 << 48
d = -281474976710656 # 1 << 48
e = +4611686018427387904 # 1 << 62
f = -4611686018427387904 # 1 << 62
g = +9223372036854775807 # 1 << 63 - 1
h = -9223372036854775807 # 1 << 63 - 1
for variable in self.test_32_63_bit_values.__code__.co_consts:
if variable is not None:
self.assertIsInstance(variable, int)
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
'import None',
'import x as None',
'from x import None',
'from x import y as None'
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'import os as bar',
'import os.path as bar',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'import os As bar',
'import os.path a bar',
'from sys import stdin As stdout',
'from sys import stdin a stdout',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,'
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.__code__), id(f2.__code__))
def test_lambda_doc(self):
l = lambda: "foo"
self.assertIsNone(l.__doc__)
def test_encoding(self):
code = b'# -*- coding: badencoding -*-\npass\n'
self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec')
code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n'
compile(code, 'tmp', 'exec')
self.assertEqual(eval(code), '\xc2\xa4')
code = '"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\u20ac')
code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4')
code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4')
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
class str_map(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
return self.data[str(key)]
def __setitem__(self, key, value):
self.data[str(key)] = value
def __delitem__(self, key):
del self.data[str(key)]
def __contains__(self, key):
return str(key) in self.data
d = str_map()
# Index
d[1] = 1
self.assertEqual(d[1], 1)
d[1] += 1
self.assertEqual(d[1], 2)
del d[1]
self.assertNotIn(1, d)
# Tuple of indices
d[1, 1] = 1
self.assertEqual(d[1, 1], 1)
d[1, 1] += 1
self.assertEqual(d[1, 1], 2)
del d[1, 1]
self.assertNotIn((1, 1), d)
# Simple slice
d[1:2] = 1
self.assertEqual(d[1:2], 1)
d[1:2] += 1
self.assertEqual(d[1:2], 2)
del d[1:2]
self.assertNotIn(slice(1, 2), d)
# Tuple of simple slices
d[1:2, 1:2] = 1
self.assertEqual(d[1:2, 1:2], 1)
d[1:2, 1:2] += 1
self.assertEqual(d[1:2, 1:2], 2)
del d[1:2, 1:2]
self.assertNotIn((slice(1, 2), slice(1, 2)), d)
# Extended slice
d[1:2:3] = 1
self.assertEqual(d[1:2:3], 1)
d[1:2:3] += 1
self.assertEqual(d[1:2:3], 2)
del d[1:2:3]
self.assertNotIn(slice(1, 2, 3), d)
# Tuple of extended slices
d[1:2:3, 1:2:3] = 1
self.assertEqual(d[1:2:3, 1:2:3], 1)
d[1:2:3, 1:2:3] += 1
self.assertEqual(d[1:2:3, 1:2:3], 2)
del d[1:2:3, 1:2:3]
self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d)
# Ellipsis
d[...] = 1
self.assertEqual(d[...], 1)
d[...] += 1
self.assertEqual(d[...], 2)
del d[...]
self.assertNotIn(Ellipsis, d)
# Tuple of Ellipses
d[..., ...] = 1
self.assertEqual(d[..., ...], 1)
d[..., ...] += 1
self.assertEqual(d[..., ...], 2)
del d[..., ...]
self.assertNotIn((Ellipsis, Ellipsis), d)
def test_annotation_limit(self):
# 16 bits are available for # of annotations, but only 8 bits are
# available for the parameter count, hence 255
# is the max. Ensure the result of too many annotations is a
# SyntaxError.
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(256))
self.assertRaises(SyntaxError, compile, s, '?', 'exec')
# Test that the max # of annotations compiles.
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(255))
compile(s, '?', 'exec')
def test_mangling(self):
class A:
def f():
__mangled = 1
__not_mangled__ = 2
import __mangled_mod
import __package__.module
self.assertIn("_A__mangled", A.f.__code__.co_varnames)
self.assertIn("__not_mangled__", A.f.__code__.co_varnames)
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
fname = fname[:-1]
with open(fname, 'r') as f:
fcontents = f.read()
sample_code = [
['<assign>', 'x = 5'],
['<ifblock>', """if True:\n pass\n"""],
['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""],
['<deffunc>', """def foo():\n pass\nfoo()\n"""],
[fname, fcontents],
]
for fname, code in sample_code:
co1 = compile(code, '%s1' % fname, 'exec')
ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast) == _ast.Module)
co2 = compile(ast, '%s3' % fname, 'exec')
self.assertEqual(co1, co2)
# the code object's filename comes from the second compilation step
self.assertEqual(co2.co_filename, '%s3' % fname)
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
self.assertRaises(TypeError, compile, co1, '<ast>', 'eval')
# raise exception when node type is no start node
self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec')
# raise exception when node has invalid children
ast = _ast.Module()
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
def test_dict_evaluation_order(self):
i = 0
def f():
nonlocal i
i += 1
return i
d = {f(): f(), f(): f()}
self.assertEqual(d, {1: 2, 3: 4})
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
c = compile(s, "myfile", "exec")
for obj in c.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(obj.co_filename, c.co_filename)
def test_single_statement(self):
self.compile_single("1 + 2")
self.compile_single("\n1 + 2")
self.compile_single("1 + 2\n")
self.compile_single("1 + 2\n\n")
self.compile_single("1 + 2\t\t\n")
self.compile_single("1 + 2\t\t\n ")
self.compile_single("1 + 2 # one plus two")
self.compile_single("1; 2")
self.compile_single("import sys; sys")
self.compile_single("def f():\n pass")
self.compile_single("while False:\n pass")
self.compile_single("if x:\n f(x)")
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')
self.assertInvalidSingle('a = 13\nb = 187')
self.assertInvalidSingle('del x\ndel y')
self.assertInvalidSingle('f()\ng()')
self.assertInvalidSingle('f()\n# blah\nblah()')
self.assertInvalidSingle('f()\nxy # blah\nblah()')
self.assertInvalidSingle('x = 5 # comment\nx = 6\n')
def test_particularly_evil_undecodable(self):
# Issue 24022
src = b'0000\x00\n00000000000\n\x00\n\x9e\n'
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
@support.cpython_only
def test_compiler_recursion_limit(self):
# Expected limit is sys.getrecursionlimit() * the scaling factor
# in symtable.c (currently 3)
# We expect to fail *at* that limit, because we use up some of
# the stack depth limit in the test suite code
# So we check the expected limit and 75% of that
# XXX (ncoghlan): duplicating the scaling factor here is a little
# ugly. Perhaps it should be exposed somewhere...
fail_depth = sys.getrecursionlimit() * 3
success_depth = int(fail_depth * 0.75)
def check_limit(prefix, repeated):
expect_ok = prefix + repeated * success_depth
self.compile_single(expect_ok)
broken = prefix + repeated * fail_depth
details = "Compiling ({!r} + {!r} * {})".format(
prefix, repeated, fail_depth)
with self.assertRaises(RuntimeError, msg=details):
self.compile_single(broken)
check_limit("a", "()")
check_limit("a", ".b")
check_limit("a", "[0]")
check_limit("a", "*a")
class TestStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
# stays within reasonable bounds (see issue #21523 for an example
# dysfunction).
N = 100
def check_stack_size(self, code):
# To assert that the alleged stack size is not O(N), we
# check that it is smaller than log(N).
if isinstance(code, str):
code = compile(code, "<foo>", "single")
max_size = math.ceil(math.log(len(code.co_code)))
self.assertLessEqual(code.co_stacksize, max_size)
def test_and(self):
self.check_stack_size("x and " * self.N + "x")
def test_or(self):
self.check_stack_size("x or " * self.N + "x")
def test_and_or(self):
self.check_stack_size("x and x or " * self.N + "x")
def test_chained_comparison(self):
self.check_stack_size("x < " * self.N + "x")
def test_if_else(self):
self.check_stack_size("x if x else " * self.N + "x")
def test_binop(self):
self.check_stack_size("x + " * self.N + "x")
def test_func_and(self):
code = "def f(x):\n"
code += " x and x\n" * self.N
self.check_stack_size(code)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
aioue/ansible | lib/ansible/playbook/role/requirement.py | 81 | 8216 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead. Line that trigger this: %s" % role_spec,
version="2.7")
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull)
except (IOError, OSError):
raise AnsibleError("error executing: %s" % " ".join(checkout_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
| gpl-3.0 |
willingc/oh-mainline | vendor/packages/Django/tests/regressiontests/sites_framework/tests.py | 150 | 1800 | from __future__ import absolute_import
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase
from .models import (SyndicatedArticle, ExclusiveArticle, CustomArticle,
InvalidArticle, ConfusedArticle)
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID+1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID+1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID+1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(title="Tantalizing News!", places_this_article_should_appear_id=settings.SITE_ID)
self.assertEqual(CustomArticle.on_site.all().get(), article)
def test_invalid_name(self):
article = InvalidArticle.objects.create(title="Bad News!", site_id=settings.SITE_ID)
self.assertRaises(ValueError, InvalidArticle.on_site.all)
def test_invalid_field_type(self):
article = ConfusedArticle.objects.create(title="More Bad News!", site=settings.SITE_ID)
self.assertRaises(TypeError, ConfusedArticle.on_site.all)
| agpl-3.0 |
sanghinitin/golismero | tools/sqlmap/lib/core/subprocessng.py | 7 | 5632 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import errno
import os
import subprocess
import sys
import time
from lib.core.settings import IS_WIN
if IS_WIN:
try:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
except ImportError:
pass
import msvcrt
else:
import select
import fcntl
if (sys.hexversion >> 16) >= 0x202:
FCNTL = fcntl
else:
import FCNTL
def blockingReadFromFD(fd):
# Quick twist around original Twisted function
# Blocking read from a non-blocking file descriptor
output = ""
while True:
try:
output += os.read(fd, 8192)
except (OSError, IOError), ioe:
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
# Uncomment the following line if the process seems to
# take a huge amount of cpu time
# time.sleep(0.01)
continue
else:
raise
break
if not output:
raise EOFError("fd %s has been closed." % fd)
return output
def blockingWriteToFD(fd, data):
# Another quick twist
while True:
try:
data_length = len(data)
wrote_data = os.write(fd, data)
except (OSError, IOError), io:
if io.errno in (errno.EAGAIN, errno.EINTR):
continue
else:
raise
if wrote_data < data_length:
blockingWriteToFD(fd, data[wrote_data:])
break
# the following code is taken from http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
if self.universal_newlines:
read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: # broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
if self.universal_newlines:
r = self._translate_newlines(r)
return r
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time() + t
y = []
r = ''
if stderr:
pr = p.recv_err
else:
pr = p.recv
while time.time() < x or r:
r = pr()
if r is None:
break
elif r:
y.append(r)
else:
time.sleep(max((x - time.time()) / tr, 0))
return ''.join(y)
def send_all(p, data):
if not data:
return
while len(data):
sent = p.send(data)
if not isinstance(sent, int):
break
data = buffer(data, sent)
| gpl-2.0 |
alistairlow/tensorflow | tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py | 61 | 15941 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_ops.sparse_tensor_dense_matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import test
def _maybe_complex(x):
if x.dtype.kind == "c": # complex
return (x + 1j * x) / 2
return x
class SparseTensorDenseMatMulTest(test.TestCase):
def _testMatmul(self,
x,
y,
adjoint_a=False,
adjoint_b=False,
indices_dtype=np.int64):
x_mat = np.matrix(x)
if adjoint_a:
x_mat = x_mat.H
y_mat = np.matrix(y)
if adjoint_b:
y_mat = y_mat.H
np_ans = x_mat * y_mat
x_indices = np.vstack(np.where(x)).astype(indices_dtype).T
x_values = x[np.where(x)]
x_shape = x.shape
with self.test_session(use_gpu=True):
sp_x_value = sparse_tensor.SparseTensorValue(
indices=x_indices, values=x_values, dense_shape=x_shape)
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(
sparse_tensor.SparseTensor.from_value(sp_x_value),
y,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Ensure that the RHS shape is known at least.
self.assertEqual(tf_value_ans.get_shape()[1], np_ans.shape[1])
self.assertEqual(tf_tensor_ans.get_shape()[1], np_ans.shape[1])
for out in (tf_value_ans.eval(), tf_tensor_ans.eval()):
if x.dtype == np.float32:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
elif x.dtype == np.float64:
self.assertAllClose(np_ans, out, rtol=1e-6, atol=1e-6)
else:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
def _testBasic(self, value_dtype, indices_dtype=np.int64):
x = _maybe_complex(np.random.rand(10, 10).astype(value_dtype))
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = _maybe_complex(np.random.randn(10, 20).astype(value_dtype))
self._testMatmul(x, y, indices_dtype=indices_dtype)
def testBasic(self):
np.random.seed(127) # Repeatable results
self._testBasic(np.int32)
self._testBasic(np.float32)
self._testBasic(np.float64)
self._testBasic(np.complex64)
self._testBasic(np.complex128)
self._testBasic(np.int32, indices_dtype=np.int32)
self._testBasic(np.float32, indices_dtype=np.int32)
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)
result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)
x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_unknown)
result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(result_left_shape_unknown.get_shape().as_list(),
[None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
def testInvalidIndicesForSparseTensorDenseMatmul(self):
# Note: use_gpu=False because nice errors are only returned from CPU kernel.
with self.test_session(use_gpu=False):
indices = np.matrix([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
# Repeat with adjoint_a, to get a different error.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
def testInvalidIndicesForSparseTensorDenseMatmulOnGPU(self):
# Note: use_gpu=False because nice errors are only returned from CPU kerne
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True):
indices = np.array([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [np.nan] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
expected_t = np.array(
[[0] * 500, [np.nan] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
# Repeat with adjoint_a, now the error is that the sparse index
# is OOO w.r.t. the output. The GPU kernel can't do much here,
# so it just doesn't accumulate.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
expected_t = np.array([[0] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
# Tests setting one dimension to be a high value.
def _testLarge(self, np_dtype):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3),
(r2, r1, r3),
(r2, r3, r1)]:
x = _maybe_complex(np.random.rand(m, k).astype(np_dtype))
x[np.abs(x) < 0.8] = 0
y = _maybe_complex(np.random.randn(k, n).astype(np_dtype))
self._testMatmul(x, y, adjoint_a=False, adjoint_b=False)
self._testMatmul(x.transpose(), y, adjoint_a=True, adjoint_b=False)
self._testMatmul(x, y.transpose(), adjoint_a=False, adjoint_b=True)
self._testMatmul(
x.transpose(), y.transpose(), adjoint_a=True, adjoint_b=True)
np.random.seed(127) # Repeatable results
self._testLarge(np.float32)
self._testLarge(np.float64)
self._testLarge(np.complex64)
self._testLarge(np.complex128)
# Tests random sized matrices.
def testFloatRandom(self):
np.random.seed(127) # Repeatable results
for _ in range(8):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
for thresh in [0.0, 0.2, 0.8, 1.0]:
n, k, m = np.random.randint(1, 100, size=3)
x = np.random.rand(n, k).astype(np.float32)
x[x < thresh] = 0 # Make it sparse
y = np.random.randn(k, m).astype(np.float32)
x = x.transpose() if adjoint_a else x
y = y.transpose() if adjoint_b else y
self._testMatmul(x, y, adjoint_a, adjoint_b)
def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,
adjoint_b):
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, math_ops.matmul(
x,
y,
transpose_a=adjoint_a,
transpose_b=adjoint_b,
a_is_sparse=True,
b_is_sparse=False))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,
y, adjoint_a,
adjoint_b):
sp_x = sparse_tensor.SparseTensor(
indices=x_ind, values=x_val, dense_shape=x_shape)
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, sparse_ops.sparse_tensor_dense_matmul(
sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,
m,
k,
n,
adjoint_a,
adjoint_b,
use_gpu,
skip_dense=False):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Configurable for benchmarking:
# config.intra_op_parallelism_threads = 100
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
np.random.seed([6, 117]) # Reproducibility
x = np.random.rand(m, k).astype(np.float32)
x[x < thresh] = 0
y = np.random.randn(k, n).astype(np.float32)
if adjoint_a:
x = x.T
if adjoint_b:
y = y.T
def _timer(sess, ops_fn, iterations):
# Warm in
sess.run(ops_fn(10, sess))
# Timing run
start = time.time()
sess.run(ops_fn(iterations, sess))
end = time.time()
return (end - start) / (1.0 * iterations) # Average runtime per iteration
# Using regular matmul, marking one of the matrices as dense.
if skip_dense:
delta_dense = float("nan")
else:
with session.Session(config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
delta_dense = _timer(sess, ops_fn, 200)
# Using sparse_tensor_dense_matmul.
with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
delta_sparse = _timer(sess, ops_fn, 200)
print("%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g" %
(1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("DenseDense MatMul (w/ Sparse Flag) vs. SparseTensorDense MatMul")
print("Matrix sizes:")
print(" A sparse [m, k] with % nonzero values between 1% and 80%")
print(" B dense [k, n]")
print("")
print("% nnz \t n \t gpu \t m \t k \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for thresh in (0.99, 0.8, 0.5, 0.2):
for n in (50, 100):
for use_gpu in (True, False):
for m in (100, 1000):
for k in (100, 1000):
sparse_tensor_dense_vs_dense_matmul_benchmark(
thresh, m, k, n, False, False, use_gpu=use_gpu)
# Enable for large scale benchmarks, these ones take a long time to run.
#
# for use_gpu in (True, False):
# sparse_tensor_dense_vs_dense_matmul_benchmark(
# thresh=0.99, m=1000000, k=1000, n=100, adjoint_a=False,
# adjoint_b=False, use_gpu=use_gpu, skip_dense=True)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| apache-2.0 |
trishnaguha/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py | 3 | 3835 | #!/usr/bin/python
# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_network_interface_address
author:
- Juergen Wiebe (@example)
short_description: create, update or destroy network/interface_address object
description:
- Create, update or destroy a network/interface_address object in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
address:
description:
- The ip4 address of the network/interface_address object.
required: true
address6:
description:
- The ip6 address of the network/interface_address object.
required: false
comment:
description:
- An optional comment to add to the object
resolved:
description:
- Whether or not the object is resolved
resolved6:
description:
- Whether or not the object is resolved
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
# Create a network interface address
- name: utm network interface address
utm_proxy_backend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
address: 0.0.0.0
state: present
# Remove a network interface address
- name: utm network interface address
network_interface_address:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
address: 0.0.0.0
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
address:
description: The ip4 address of the network/interface_address object
type: string
address6:
description: The ip6 address of the network/interface_address object
type: string
comment:
description: The comment string
type: string
resolved:
description: Whether or not the object is resolved
type: boolean
resolved6:
description: Whether or not the object is resolved
type: boolean
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "network/interface_address"
key_to_check_for_changes = ["comment", "address"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
address=dict(type='str', required=True),
comment=dict(type='str', required=False, default=""),
address6=dict(type='str', required=False),
resolved=dict(type='boolean', required=False),
resolved6=dict(type='boolean', required=False)
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
jasonkying/pip | pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
Kitryn/PTCAccount2 | ptcaccount2/console.py | 1 | 4476 | import argparse
import sys
import ptcaccount2
from ptcaccount2.ptcexceptions import *
def parse_arguments(args):
"""Parse the command line arguments for the console commands.
Args:
args (List[str]): List of string arguments to be parsed.
Returns:
Namespace: Namespace with the parsed arguments.
"""
parser = argparse.ArgumentParser(
description='Pokemon Trainer Club Account Creator'
)
parser.add_argument(
'-u', '--username', type=str, default=None,
help='Username for the new account (defaults to random string).'
)
parser.add_argument(
'-p', '--password', type=str, default=None,
help='Password for the new account (defaults to random string).'
)
parser.add_argument(
'-e', '--email', type=str, default=None,
help='Email for the new account (defaults to random email-like string).'
)
parser.add_argument(
'-b', '--birthday', type=str, default=None,
help='Birthday for the account. Must be YYYY-MM-DD. (default is a random birthday).'
)
parser.add_argument(
'-m', '--multiple', type=int, default=1,
help='Specify a number of accounts to create (default 1). If >1 AND `--email` is specified, the `--email-tag` option will be automatically set.'
)
parser.add_argument(
'--compact', action='store_true',
help='Compact the output to "username:password"'
)
parser.add_argument(
'--email-tag', action='store_true',
help='Add the username as a tag to the email (i.e addr+tag@mail.com).'
) # Note: Email max length is 75 characters.
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--tofile', action='store_true',
help='Output "username:password" into file "accounts.txt". [Deprecated: Use "--output accounts.txt" instead.]'
)
group.add_argument(
'-o', '--output', metavar='FILE',
help='Output "username:password" to a file.'
)
return parser.parse_args(args)
def entry():
"""Main entry point for the package console commands"""
args = parse_arguments(sys.argv[1:])
account_summary = []
try:
if args.multiple > 1 and args.username is not None:
raise ValueError("Username cannot be set if --multiple is greater than 1!")
if args.email is not None and len(args.email) > 75:
raise ValueError("Email cannot be longer than 75 characters!")
if args.multiple > 1 and args.email is not None:
# If email is set when more than one account is set to be created, email-tag must be True
args.email_tag = True
print('Creating new account(s):')
for _ in range(args.multiple):
# Create the random account
account_info = ptcaccount2.random_account(
args.username, args.password, args.email, args.birthday, args.email_tag)
if args.compact:
print('{}:{}'.format(account_info["username"], account_info["password"]))
else:
print(' Username: {}'.format(account_info["username"]))
print(' Password: {}'.format(account_info["password"]))
print(' Email : {}'.format(account_info["email"]))
print('\n')
if args.tofile:
output = 'accounts.txt'
elif args.output:
output = args.output
else:
output = None
if output is not None:
with open(output, 'a+') as writeto:
writeto.write('{}:{}'.format(account_info["username"], account_info["password"]) + "\n")
print('Appended to file {}'.format(output))
account_summary.append({"username": account_info["username"], "password": account_info["password"]})
# Handle account creation failure exceptions
except PTCInvalidPasswordException as err:
print('Invalid password: {}'.format(err))
except (PTCInvalidEmailException, PTCInvalidNameException) as err:
print('Failed to create account! {}'.format(err))
except PTCException as err:
print('Failed to create account! General error: {}'.format(err))
finally:
if args.multiple > 1:
print("Summary of accounts created:")
for account in account_summary:
print("{}:{}".format(account["username"], account["password"]))
| mit |
jacobakkerboom/CodeCombat2 | scripts/devSetup/ruby.py | 77 | 1873 | from __future__ import print_function
__author__ = u'root'
import dependency
import configuration
import shutil
import errors
import subprocess
from which import which
class Ruby(dependency.Dependency):
def __init__(self,config):
self.config = config
assert isinstance(config,configuration.Configuration)
is_ruby_installed = self.check_if_ruby_exists()
is_gem_installed = self.check_if_gem_exists()
if is_ruby_installed and not is_gem_installed:
#this means their ruby is so old that RubyGems isn't packaged with it
raise errors.CoCoError(u"You have an extremely old version of Ruby. Please upgrade it to get RubyGems.")
elif not is_ruby_installed:
self.install_ruby()
elif is_ruby_installed and is_gem_installed:
print(u"Ruby found.")
def check_if_ruby_exists(self):
ruby_path = which(u"ruby")
return bool(ruby_path)
def check_if_gem_exists(self):
gem_path = which(u"gem")
return bool(gem_path)
def install_ruby(self):
operating_system = self.config.system.operating_system
#Ruby is on every recent Mac, most Linux distros
if operating_system == u"windows":
self.install_ruby_on_windows()
elif operating_system == u"mac":
raise errors.CoCoError(u"Ruby should be installed with Mac OSX machines. Please install Ruby.")
elif operating_system == u"linux":
raise errors.CoCoError(u"Please install Ruby (try 'sudo apt-get install ruby').\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing ruby.")
def install_ruby_on_windows(self):
raise NotImplementedError
def install_gems(self):
gem_install_status = subprocess.call([u"gem",u"install",u"--no-user-install",u"sass"])
| mit |
haripradhan/MissionPlanner | Lib/distutils/sysconfig.py | 42 | 21927 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
__revision__ = "$Id$"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
project_base = os.path.dirname(os.path.abspath(sys.executable))
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(project_base, "Modules", fn)):
return True
return False
python_build = _python_build()
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
buildir = os.path.dirname(sys.executable)
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
else:
# the source dir is relative to the buildir
srcdir = os.path.abspath(os.path.join(buildir,
get_config_var('srcdir')))
# Include is located in the srcdir
inc_dir = os.path.join(srcdir, "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if 'CC' in os.environ:
cc = os.environ['CC']
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(project_base, "PC")
else:
inc_dir = project_base
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
if g is None:
g = {}
done = {}
notdone = {}
while 1:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while 1:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
g = {}
# load the installed Makefile:
try:
filename = get_makefile_filename()
parse_makefile(filename, g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# load the installed pyconfig.h:
try:
filename = get_config_h_filename()
parse_config_h(file(filename), g)
except IOError, msg:
my_msg = "invalid Python installation: unable to open %s" % filename
if hasattr(msg, "strerror"):
my_msg = my_msg + " (%s)" % msg.strerror
raise DistutilsPlatformError(my_msg)
# On MacOSX we need to check the setting of the environment variable
# MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
# it needs to be compatible.
# If it isn't set we set it to the configure-time value
if sys.platform == 'darwin' and 'MACOSX_DEPLOYMENT_TARGET' in g:
cfg_target = g['MACOSX_DEPLOYMENT_TARGET']
cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
if cur_target == '':
cur_target = cfg_target
os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')):
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
% (cur_target, cfg_target))
raise DistutilsPlatformError(my_msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if python_build:
g['LDSHARED'] = g['BLDSHARED']
elif get_python_version() < '2.1':
# The following two branches are for 1.5.2 compatibility.
if sys.platform == 'aix4': # what about AIX 3.x ?
# Linker script is in the config directory, not in Modules as the
# Makefile says.
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
elif sys.platform == 'beos':
# Linker script is in the config directory. In the Makefile it is
# relative to the srcdir, which after installation no longer makes
# sense.
python_lib = get_python_lib(standard_lib=1)
linkerscript_path = string.split(g['LDSHARED'])[0]
linkerscript_name = os.path.basename(linkerscript_path)
linkerscript = os.path.join(python_lib, 'config',
linkerscript_name)
# XXX this isn't the right place to do this: adding the Python
# library to the link, if needed, should be in the "build_ext"
# command. (It's also needed for non-MS compilers on Windows, and
# it's taken care of for them by the 'build_ext.get_libraries()'
# method.)
g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
(linkerscript, PREFIX, get_python_version()))
global _config_vars
_config_vars = g
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def _init_os2():
"""Initialize the module as appropriate for OS/2"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['SO'] = '.pyd'
g['EXE'] = ".exe"
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_config_vars[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_config_vars[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS'])
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _config_vars[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_config_vars[key] = flags
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
return get_config_vars().get(name)
| gpl-3.0 |
VRToxin-AOSP/android_external_skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
joone/chromium-crosswalk | components/test/data/password_manager/automated_tests/websitetest.py | 33 | 12521 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""WebsiteTest testing class."""
import logging
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import environment
SCRIPT_DEBUG = 9 # TODO(vabr) -- make this consistent with run_tests.py.
class WebsiteTest:
"""WebsiteTest testing class.
Represents one website, defines some generic operations on that site.
To customise for a particular website, this class needs to be inherited
and the Login() method overridden.
"""
# Possible values of self.autofill_expectation.
AUTOFILLED = 1 # Expect password and username to be autofilled.
NOT_AUTOFILLED = 2 # Expect password and username not to be autofilled.
# The maximal accumulated time to spend in waiting for website UI
# interaction.
MAX_WAIT_TIME_IN_SECONDS = 200
def __init__(self, name, username_not_auto=False, password_not_auto=False):
"""Creates a new WebsiteTest.
Args:
name: The website name, identifying it in the test results.
username_not_auto: Expect that the tested website fills username field
on load, and Chrome cannot autofill in that case.
password_not_auto: Expect that the tested website fills password field
on load, and Chrome cannot autofill in that case.
"""
self.name = name
self.username = None
self.password = None
self.username_not_auto = username_not_auto
self.password_not_auto = password_not_auto
# Specify, whether it is expected that credentials get autofilled.
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.remaining_seconds_to_wait = WebsiteTest.MAX_WAIT_TIME_IN_SECONDS
# The testing Environment, if added to any.
self.environment = None
# The webdriver from the environment.
self.driver = None
# Mouse/Keyboard actions.
def Click(self, selector):
"""Clicks on the element described by |selector|.
Args:
selector: The clicked element's CSS selector.
"""
logging.log(SCRIPT_DEBUG, "action: Click %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.click()
def ClickIfClickable(self, selector):
"""Clicks on the element described by |selector| if it is clickable.
The driver's find_element_by_css_selector method defines what is clickable
-- anything for which it does not throw, is clickable. To be clickable,
the element must:
* exist in the DOM,
* be not covered by another element
* be inside the visible area.
Note that transparency does not influence clickability.
Args:
selector: The clicked element's CSS selector.
Returns:
True if the element is clickable (and was clicked on).
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: ClickIfVisible %s" % selector)
element = self.WaitUntilDisplayed(selector)
try:
element.click()
return True
except Exception:
return False
def GoTo(self, url):
"""Navigates the main frame to |url|.
Args:
url: The URL of where to go to.
"""
logging.log(SCRIPT_DEBUG, "action: GoTo %s" % self.name)
self.driver.get(url)
def HoverOver(self, selector):
"""Hovers over the element described by |selector|.
Args:
selector: The CSS selector of the element to hover over.
"""
logging.log(SCRIPT_DEBUG, "action: Hover %s" % selector)
element = self.WaitUntilDisplayed(selector)
hover = ActionChains(self.driver).move_to_element(element)
hover.perform()
# Waiting/Displaying actions.
def _ReturnElementIfDisplayed(self, selector):
"""Returns the element described by |selector|, if displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
The element if displayed, None otherwise.
"""
try:
element = self.driver.find_element_by_css_selector(selector)
return element if element.is_displayed() else None
except Exception:
return None
def IsDisplayed(self, selector):
"""Check if the element described by |selector| is displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
True if the element is in the DOM and less than 100% transparent.
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: IsDisplayed %s" % selector)
return self._ReturnElementIfDisplayed(selector) is not None
def Wait(self, duration):
"""Wait for |duration| in seconds.
To avoid deadlocks, the accummulated waiting time for the whole object does
not exceed MAX_WAIT_TIME_IN_SECONDS.
Args:
duration: The time to wait in seconds.
Raises:
Exception: In case the accummulated waiting limit is exceeded.
"""
logging.log(SCRIPT_DEBUG, "action: Wait %s" % duration)
self.remaining_seconds_to_wait -= duration
if self.remaining_seconds_to_wait < 0:
raise Exception("Waiting limit exceeded for website: %s" % self.name)
time.sleep(duration)
# TODO(vabr): Pull this out into some website-utils and use in Environment
# also?
def WaitUntilDisplayed(self, selector):
"""Waits until the element described by |selector| is displayed.
Args:
selector: The CSS selector of the element to wait for.
Returns:
The displayed element.
"""
element = self._ReturnElementIfDisplayed(selector)
while not element:
self.Wait(1)
element = self._ReturnElementIfDisplayed(selector)
return element
# Form actions.
def FillPasswordInto(self, selector):
"""Ensures that the selected element's value is the saved password.
Depending on self.autofill_expectation, this either checks that the
element already has the password autofilled, or checks that the value
is empty and replaces it with the password. If self.password_not_auto
is true, it skips the checks and just overwrites the value with the
password.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillPasswordInto %s" % selector)
password_element = self.WaitUntilDisplayed(selector)
# Chrome protects the password inputs and doesn't fill them until
# the user interacts with the page. To be sure that such thing has
# happened we perform |Keys.CONTROL| keypress.
action_chains = ActionChains(self.driver)
action_chains.key_down(Keys.CONTROL).key_up(Keys.CONTROL).perform()
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if not self.password_not_auto:
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if password_element.get_attribute("value") != self.password:
raise Exception("Error: autofilled password is different from the"
"saved one on website: %s" % self.name)
elif self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if password_element.get_attribute("value"):
raise Exception("Error: password value unexpectedly not empty on"
"website: %s" % self.name)
password_element.clear()
password_element.send_keys(self.password)
def FillUsernameInto(self, selector):
"""Ensures that the selected element's value is the saved username.
Depending on self.autofill_expectation, this either checks that the
element already has the username autofilled, or checks that the value
is empty and replaces it with the password. If self.username_not_auto
is true, it skips the checks and just overwrites the value with the
username.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillUsernameInto %s" % selector)
username_element = self.WaitUntilDisplayed(selector)
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if not self.username_not_auto:
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if username_element.get_attribute("value") != self.username:
raise Exception("Error: filled username different from the saved"
" one on website: %s" % self.name)
return
if self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if username_element.get_attribute("value"):
raise Exception("Error: username value unexpectedly not empty on"
"website: %s" % self.name)
username_element.clear()
username_element.send_keys(self.username)
def Submit(self, selector):
"""Finds an element using CSS |selector| and calls its submit() handler.
Args:
selector: The CSS selector for the element to call submit() on.
"""
logging.log(SCRIPT_DEBUG, "action: Submit %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.submit()
# Login/Logout methods
def Login(self):
"""Login Method. Has to be overridden by the WebsiteTest test."""
raise NotImplementedError("Login is not implemented.")
def LoginWhenAutofilled(self):
"""Logs in and checks that the password is autofilled."""
self.autofill_expectation = WebsiteTest.AUTOFILLED
self.Login()
def LoginWhenNotAutofilled(self):
"""Logs in and checks that the password is not autofilled."""
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.Login()
def Logout(self):
self.environment.DeleteCookies()
# Test scenarios
def PromptFailTest(self):
"""Checks that prompt is not shown on a failed login attempt.
Tries to login with a wrong password and checks that the password
is not offered for saving.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptFailTest for %s" % self.name)
correct_password = self.password
# Hardcoded random wrong password. Chosen by fair `pwgen` call.
# For details, see: http://xkcd.com/221/.
self.password = "ChieF2ae"
self.LoginWhenNotAutofilled()
self.password = correct_password
self.environment.CheckForNewString(
[environment.MESSAGE_ASK, environment.MESSAGE_SAVE],
False,
"Error: did not detect wrong login on website: %s" % self.name)
def PromptSuccessTest(self):
"""Checks that prompt is shown on a successful login attempt.
Tries to login with a correct password and checks that the password
is offered for saving. Chrome cannot have the auto-save option on
when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptSuccessTest for %s" % self.name)
if not self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_ASK],
True,
"Error: did not detect login success on website: %s" % self.name)
def SaveAndAutofillTest(self):
"""Checks that a correct password is saved and autofilled.
Tries to login with a correct password and checks that the password
is saved and autofilled on next visit. Chrome must have the auto-save
option on when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "SaveAndAutofillTest for %s" % self.name)
if self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: did not detect login success on website: %s" % self.name)
self.Logout()
self.LoginWhenAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: failed autofilled login on website: %s" % self.name)
| bsd-3-clause |
repotvsupertuga/tvsupertuga.repository | instal/plugin.video.SportsDevil/service/oscrypto/errors.py | 7 | 1657 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import socket
__all__ = [
'AsymmetricKeyError',
'CACertsError',
'LibraryNotFoundError',
'SignatureError',
'TLSError',
'TLSVerificationError',
]
class LibraryNotFoundError(Exception):
"""
An exception when trying to find a shared library
"""
pass
class SignatureError(Exception):
"""
An exception when validating a signature
"""
pass
class AsymmetricKeyError(Exception):
"""
An exception when a key is invalid or unsupported
"""
pass
class IncompleteAsymmetricKeyError(AsymmetricKeyError):
"""
An exception when a key is missing necessary information
"""
pass
class CACertsError(Exception):
"""
An exception when exporting CA certs from the OS trust store
"""
pass
class TLSError(socket.error):
"""
An exception related to TLS functionality
"""
message = None
def __init__(self, message):
self.args = (message,)
self.message = message
def __str__(self):
output = self.__unicode__()
if sys.version_info < (3,):
output = output.encode('utf-8')
return output
def __unicode__(self):
return self.message
class TLSVerificationError(TLSError):
"""
A server certificate verification error happened during a TLS handshake
"""
certificate = None
def __init__(self, message, certificate):
TLSError.__init__(self, message)
self.certificate = certificate
self.args = (message, certificate)
| gpl-2.0 |
cryptominingpool204/cryptominingpool204.github.io | stratum-mining-litecoin-stratum-lite/stratum-mining-litecoin-stratum-lite/lib/config_default.py | 1 | 6894 | '''
This is example configuration for Stratum server.
Please rename it to config.py and fill correct values.
'''
# ******************** GENERAL SETTINGS ***************
# Enable some verbose debug (logging requests and responses).
DEBUG = False
# Destination for application logs, files rotated once per day.
LOGDIR = 'log/'
# Main application log file.
LOGFILE = 'stratum.log' #'stratum.log'
# Possible values: DEBUG, INFO, WARNING, ERROR, CRITICAL
LOGLEVEL = 'INFO'
# Logging Rotation can be enabled with the following settings
# It if not enabled here, you can set up logrotate to rotate the files.
# For built in log rotation set LOG_ROTATION = True and configrue the variables
LOG_ROTATION = True
LOG_SIZE = 10485760 # Rotate every 10M
LOG_RETENTION = 10 # Keep 10 Logs
# How many threads use for synchronous methods (services).
# 30 is enough for small installation, for real usage
# it should be slightly more, say 100-300.
THREAD_POOL_SIZE = 100
# RPC call throws TimeoutServiceException once total time since request has been
# placed (time to delivery to client + time for processing on the client)
# crosses _TOTAL (in second).
# _TOTAL reflects the fact that not all transports deliver RPC requests to the clients
# instantly, so request can wait some time in the buffer on server side.
# NOT IMPLEMENTED YET
#RPC_TIMEOUT_TOTAL = 600
# RPC call throws TimeoutServiceException once client is processing request longer
# than _PROCESS (in second)
# NOT IMPLEMENTED YET
#RPC_TIMEOUT_PROCESS = 30
# Do you want to expose "example" service in server?
# Useful for learning the server,you probably want to disable
# this on production
ENABLE_EXAMPLE_SERVICE = False
# ******************** TRANSPORTS *********************
# Hostname or external IP to expose
HOSTNAME = 'stratum.example.com'
# Port used for Socket transport. Use 'None' for disabling the transport.
LISTEN_SOCKET_TRANSPORT = 3333
# Port used for HTTP Poll transport. Use 'None' for disabling the transport
LISTEN_HTTP_TRANSPORT = None
# Port used for HTTPS Poll transport
LISTEN_HTTPS_TRANSPORT = None
# Port used for WebSocket transport, 'None' for disabling WS
LISTEN_WS_TRANSPORT = None
# Port used for secure WebSocket, 'None' for disabling WSS
LISTEN_WSS_TRANSPORT = None
# ******************** SSL SETTINGS ******************
# Private key and certification file for SSL protected transports
# You can find howto for generating self-signed certificate in README file
SSL_PRIVKEY = 'server.key'
SSL_CACERT = 'server.crt'
# ******************** TCP SETTINGS ******************
# Enables support for socket encapsulation, which is compatible
# with haproxy 1.5+. By enabling this, first line of received
# data will represent some metadata about proxied stream:
# PROXY <TCP4 or TCP6> <source IP> <dest IP> <source port> </dest port>\n
#
# Full specification: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
TCP_PROXY_PROTOCOL = False
# ******************** HTTP SETTINGS *****************
# Keepalive for HTTP transport sessions (at this time for both poll and push)
# High value leads to higher memory usage (all sessions are stored in memory ATM).
# Low value leads to more frequent session reinitializing (like downloading address history).
HTTP_SESSION_TIMEOUT = 3600 # in seconds
# Maximum number of messages (notifications, responses) waiting to delivery to HTTP Poll clients.
# Buffer length is PER CONNECTION. High value will consume a lot of RAM,
# short history will cause that in some edge cases clients won't receive older events.
HTTP_BUFFER_LIMIT = 10000
# User agent used in HTTP requests (for both HTTP transports and for proxy calls from services)
USER_AGENT = 'Stratum/0.1'
# Provide human-friendly user interface on HTTP transports for browsing exposed services.
BROWSER_ENABLE = True
# ******************** LITECOIND SETTINGS ************
# Hostname and credentials for one trusted Bitcoin node ("Satoshi's client").
# Stratum uses both P2P port (which is 8333 everytime) and RPC port
LITECOIN_TRUSTED_HOST = '127.0.0.1'
LITECOIN_TRUSTED_PORT = 8332 # RPC port
LITECOIN_TRUSTED_USER = 'stratum'
LITECOIN_TRUSTED_PASSWORD = '***somepassword***'
# ******************** OTHER CORE SETTINGS *********************
# Use "echo -n '<yourpassword>' | sha256sum | cut -f1 -d' ' "
# for calculating SHA256 of your preferred password
ADMIN_PASSWORD_SHA256 = None # Admin functionality is disabled
#ADMIN_PASSWORD_SHA256 = '9e6c0c1db1e0dfb3fa5159deb4ecd9715b3c8cd6b06bd4a3ad77e9a8c5694219' # SHA256 of the password
# IP from which admin calls are allowed.
# Set None to allow admin calls from all IPs
ADMIN_RESTRICT_INTERFACE = '127.0.0.1'
# Use "./signature.py > signing_key.pem" to generate unique signing key for your server
SIGNING_KEY = None # Message signing is disabled
#SIGNING_KEY = 'signing_key.pem'
# Origin of signed messages. Provide some unique string,
# ideally URL where users can find some information about your identity
SIGNING_ID = None
#SIGNING_ID = 'stratum.somedomain.com' # Use custom string
#SIGNING_ID = HOSTNAME # Use hostname as the signing ID
# *********************** IRC / PEER CONFIGURATION *************
IRC_NICK = None # Skip IRC registration
#IRC_NICK = "stratum" # Use nickname of your choice
# Which hostname / external IP expose in IRC room
# This should be official HOSTNAME for normal operation.
IRC_HOSTNAME = HOSTNAME
# Don't change this unless you're creating private Stratum cloud.
IRC_SERVER = 'irc.freenode.net'
IRC_ROOM = '#stratum-nodes'
IRC_PORT = 6667
# Hardcoded list of Stratum nodes for clients to switch when this node is not available.
PEERS = [
{
'hostname': 'stratum.bitcoin.cz',
'trusted': True, # This node is trustworthy
'weight': -1, # Higher number means higher priority for selection.
# -1 will work mostly as a backup when other servers won't work.
# (IRC peers have weight=0 automatically).
},
]
'''
DATABASE_DRIVER = 'MySQLdb'
DATABASE_HOST = 'palatinus.cz'
DATABASE_DBNAME = 'marekp_bitcointe'
DATABASE_USER = 'marekp_bitcointe'
DATABASE_PASSWORD = '**empty**'
'''
#VADRIFF
# Variable Difficulty Enable
VARIABLE_DIFF = False # Master variable difficulty enable
# Variable diff tuning variables
#VARDIFF will start at the POOL_TARGET. It can go as low as the VDIFF_MIN and as high as min(VDIFF_MAX or Liteconin's difficulty)
DIFF_UPDATE_FREQUENCY = 86400 # Update the litecoin difficulty once a day for the VARDIFF maximum
VDIFF_MIN_TARGET = 15 # Minimum Target difficulty
VDIFF_MAX_TARGET = 1000 # Maximum Target difficulty
VDIFF_TARGET_TIME = 30 # Target time per share (i.e. try to get 1 share per this many seconds)
VDIFF_RETARGET_TIME = 120 # Check to see if we should retarget this often
VDIFF_VARIANCE_PERCENT = 20 # Allow average time to very this % from target without retarget
| mit |
h3biomed/ansible | lib/ansible/modules/remote_management/intersight/intersight_facts.py | 13 | 3129 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: intersight_facts
short_description: Gather facts about Intersight
description:
- Gathers facts about servers in L(Cisco Intersight,https://intersight.com).
extends_documentation_fragment: intersight
options:
server_names:
description:
- Server names to retrieve facts from.
- An empty list will return all servers.
type: list
required: yes
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.8'
'''
EXAMPLES = r'''
- name: Get facts for all servers
intersight_facts:
api_private_key: ~/Downloads/SecretKey.txt
api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650
server_names:
- debug:
msg: "server name {{ item.Name }}, moid {{ item.Moid }}"
loop: "{{ intersight_servers }}"
when: intersight_servers is defined
- name: Get facts for servers by name
intersight_facts:
api_private_key: ~/Downloads/SecretKey.txt
api_key_id: 64612d300d0982/64612d300d0b00/64612d300d3650
server_names:
- SJC18-L14-UCS1-1
- debug:
msg: "server moid {{ intersight_servers[0].Moid }}"
when: intersight_servers[0] is defined
'''
RETURN = r'''
intersight_servers:
description: A list of Intersight Servers. See L(Cisco Intersight,https://intersight.com/apidocs) for details.
returned: always
type: complex
contains:
Name:
description: The name of the server.
returned: always
type: str
sample: SJC18-L14-UCS1-1
Moid:
description: The unique identifier of this Managed Object instance.
returned: always
type: str
sample: 5978bea36ad4b000018d63dc
'''
from ansible.module_utils.remote_management.intersight import IntersightModule, intersight_argument_spec
from ansible.module_utils.basic import AnsibleModule
def get_servers(module, intersight):
query_list = []
if module.params['server_names']:
for server in module.params['server_names']:
query_list.append("Name eq '%s'" % server)
query_str = ' or '.join(query_list)
options = {
'http_method': 'get',
'resource_path': '/compute/PhysicalSummaries',
'query_params': {
'$filter': query_str,
'$top': 5000
}
}
response_dict = intersight.call_api(**options)
return response_dict.get('Results')
def main():
argument_spec = intersight_argument_spec
argument_spec.update(
server_names=dict(type='list', required=True),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
)
intersight = IntersightModule(module)
# one API call returning all requested servers
module.exit_json(intersight_servers=get_servers(module, intersight))
if __name__ == '__main__':
main()
| gpl-3.0 |
msebire/intellij-community | python/helpers/py2only/docutils/transforms/peps.py | 122 | 11065 | # $Id: peps.py 6433 2010-09-28 08:21:25Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
| apache-2.0 |
bottompawn/kbengine | kbe/res/scripts/common/Lib/idlelib/dynOptionMenuWidget.py | 61 | 1987 | """
OptionMenu widget modified to allow dynamic menu reconfiguration
and setting of highlightthickness
"""
from tkinter import OptionMenu, _setit, Tk, StringVar, Button
import copy
import re
class DynOptionMenu(OptionMenu):
"""
unlike OptionMenu, our kwargs can include highlightthickness
"""
def __init__(self, master, variable, value, *values, **kwargs):
#get a copy of kwargs before OptionMenu.__init__ munges them
kwargsCopy=copy.copy(kwargs)
if 'highlightthickness' in list(kwargs.keys()):
del(kwargs['highlightthickness'])
OptionMenu.__init__(self, master, variable, value, *values, **kwargs)
self.config(highlightthickness=kwargsCopy.get('highlightthickness'))
#self.menu=self['menu']
self.variable=variable
self.command=kwargs.get('command')
def SetMenu(self,valueList,value=None):
"""
clear and reload the menu with a new set of options.
valueList - list of new options
value - initial value to set the optionmenu's menubutton to
"""
self['menu'].delete(0,'end')
for item in valueList:
self['menu'].add_command(label=item,
command=_setit(self.variable,item,self.command))
if value:
self.variable.set(value)
def _dyn_option_menu(parent):
root = Tk()
root.title("Tets dynamic option menu")
var = StringVar(root)
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
var.set("Old option set") #Set the default value
dyn = DynOptionMenu(root,var, "old1","old2","old3","old4")
dyn.pack()
def update():
dyn.SetMenu(["new1","new2","new3","new4"],value="new option set")
button = Button(root, text="Change option set", command=update)
button.pack()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_dyn_option_menu)
| lgpl-3.0 |
mancoast/CPythonPyc_test | fail/312_test_multibytecodec_support.py | 1 | 12675 | #!/usr/bin/env python
#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import sys, codecs
import unittest, re
from test import support
from io import BytesIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = None # must set. 2 strings to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = '\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
tstring_lines = []
for b in self.tstring:
lines = b.split(b"\n")
last = lines.pop()
assert last == b""
lines = [line + b"\n" for line in lines]
tstring_lines.append(lines)
for native, utf8 in zip(*tstring_lines):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
if func is self.decode:
self.assertTrue(type(result) is str, type(result))
else:
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected)
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = "\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
b"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from html.entities import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append("&%s;" % codepoint2name[ord(c)])
else:
l.append("&#%d;" % ord(c))
return ("".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = "\xab\u211c\xbb = \u2329\u1234\u232a"
sout = b"«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), b'string', b''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return ('x', int(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdxefgh', 9))
def myreplace(exc):
return ('x', sys.maxsize + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return ('x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return ('REPLACED', 0)
else:
return ('TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'),
(b'abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return ('REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return ('TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = BytesIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = BytesIO(self.tstring[0])
ostream = UTF8Writer(BytesIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
e.reset()
def tempreplace(exc):
return ('called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), b'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(BytesIO(self.tstring[0]))
ostream = UTF8Writer(BytesIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = self.writer(BytesIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
if len('\U00012345') == 2: # ucs2 build
_unichr = chr
def chr(v):
if v >= 0x10000:
return _unichr(0xd800 + ((v - 0x10000) >> 10)) + \
_unichr(0xdc00 + ((v - 0x10000) & 0x3ff))
else:
return _unichr(v)
_ord = ord
def ord(c):
if len(c) == 2:
return 0x10000 + ((_ord(c[0]) - 0xd800) << 10) + \
(ord(c[1]) - 0xdc00)
else:
return _ord(c)
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
try:
self.open_mapping_file() # test it to report the error early
except IOError:
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
unichrs = lambda s: ''.join(map(chr, map(eval, s.split('+'))))
urt_wa = {}
for line in self.open_mapping_file():
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = bytes([csetval & 0xff])
elif csetval >= 0x1000000:
csetch = bytes([(csetval >> 24), ((csetval >> 16) & 0xff),
((csetval >> 8) & 0xff), (csetval & 0xff)])
elif csetval >= 0x10000:
csetch = bytes([(csetval >> 16), ((csetval >> 8) & 0xff),
(csetval & 0xff)])
elif csetval >= 0x100:
csetch = bytes([(csetval >> 8), (csetval & 0xff)])
else:
continue
unich = unichrs(data[1])
if ord(unich) == 0xfffd or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
ucmdata = self.open_mapping_file().read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = chr(int(uni, 16))
codech = bytes(int(c, 16) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
self.assertEqual(unich.encode(self.encoding), csetch)
if (csetch, unich) not in self.pass_dectest:
self.assertEqual(str(csetch, self.encoding), unich)
def load_teststring(encoding):
from test import cjkencodings_test
return cjkencodings_test.teststring[encoding]
| gpl-3.0 |
martijnthe/qemu_stm32 | tests/qemu-iotests/qed.py | 248 | 7194 | #!/usr/bin/env python
#
# Tool to manipulate QED image files
#
# Copyright (C) 2010 IBM, Corp.
#
# Authors:
# Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import struct
import random
import optparse
# This can be used as a module
__all__ = ['QED_F_NEED_CHECK', 'QED']
QED_F_NEED_CHECK = 0x02
header_fmt = '<IIIIQQQQQII'
header_size = struct.calcsize(header_fmt)
field_names = ['magic', 'cluster_size', 'table_size',
'header_size', 'features', 'compat_features',
'autoclear_features', 'l1_table_offset', 'image_size',
'backing_filename_offset', 'backing_filename_size']
table_elem_fmt = '<Q'
table_elem_size = struct.calcsize(table_elem_fmt)
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def unpack_header(s):
fields = struct.unpack(header_fmt, s)
return dict((field_names[idx], val) for idx, val in enumerate(fields))
def pack_header(header):
fields = tuple(header[x] for x in field_names)
return struct.pack(header_fmt, *fields)
def unpack_table_elem(s):
return struct.unpack(table_elem_fmt, s)[0]
def pack_table_elem(elem):
return struct.pack(table_elem_fmt, elem)
class QED(object):
def __init__(self, f):
self.f = f
self.f.seek(0, 2)
self.filesize = f.tell()
self.load_header()
self.load_l1_table()
def raw_pread(self, offset, size):
self.f.seek(offset)
return self.f.read(size)
def raw_pwrite(self, offset, data):
self.f.seek(offset)
return self.f.write(data)
def load_header(self):
self.header = unpack_header(self.raw_pread(0, header_size))
def store_header(self):
self.raw_pwrite(0, pack_header(self.header))
def read_table(self, offset):
size = self.header['table_size'] * self.header['cluster_size']
s = self.raw_pread(offset, size)
table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)]
return table
def load_l1_table(self):
self.l1_table = self.read_table(self.header['l1_table_offset'])
self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
def write_table(self, offset, table):
s = ''.join(pack_table_elem(x) for x in table)
self.raw_pwrite(offset, s)
def random_table_item(table):
vals = [(index, offset) for index, offset in enumerate(table) if offset != 0]
if not vals:
err('cannot pick random item because table is empty')
return random.choice(vals)
def corrupt_table_duplicate(table):
'''Corrupt a table by introducing a duplicate offset'''
victim_idx, victim_val = random_table_item(table)
unique_vals = set(table)
if len(unique_vals) == 1:
err('no duplication corruption possible in table')
dup_val = random.choice(list(unique_vals.difference([victim_val])))
table[victim_idx] = dup_val
def corrupt_table_invalidate(qed, table):
'''Corrupt a table by introducing an invalid offset'''
index, _ = random_table_item(table)
table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024)
def cmd_show(qed, *args):
'''show [header|l1|l2 <offset>]- Show header or l1/l2 tables'''
if not args or args[0] == 'header':
print qed.header
elif args[0] == 'l1':
print qed.l1_table
elif len(args) == 2 and args[0] == 'l2':
offset = int(args[1])
print qed.read_table(offset)
else:
err('unrecognized sub-command')
def cmd_duplicate(qed, table_level):
'''duplicate l1|l2 - Duplicate a random table element'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_duplicate(table)
qed.write_table(offset, table)
def cmd_invalidate(qed, table_level):
'''invalidate l1|l2 - Plant an invalid table element at random'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_invalidate(qed, table)
qed.write_table(offset, table)
def cmd_need_check(qed, *args):
'''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit'''
if not args:
print bool(qed.header['features'] & QED_F_NEED_CHECK)
return
if args[0] == 'on':
qed.header['features'] |= QED_F_NEED_CHECK
elif args[0] == 'off':
qed.header['features'] &= ~QED_F_NEED_CHECK
else:
err('unrecognized sub-command')
qed.store_header()
def cmd_zero_cluster(qed, pos, *args):
'''zero-cluster <pos> [<n>] - Zero data clusters'''
pos, n = int(pos), 1
if args:
if len(args) != 1:
err('expected one argument')
n = int(args[0])
for i in xrange(n):
l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
if qed.l1_table[l1_index] == 0:
err('no l2 table allocated')
l2_offset = qed.l1_table[l1_index]
l2_table = qed.read_table(l2_offset)
l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
l2_table[l2_index] = 1 # zero the data cluster
qed.write_table(l2_offset, l2_table)
pos += qed.header['cluster_size']
def cmd_copy_metadata(qed, outfile):
'''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)'''
out = open(outfile, 'wb')
# Match file size
out.seek(qed.filesize - 1)
out.write('\0')
# Copy header clusters
out.seek(0)
header_size_bytes = qed.header['header_size'] * qed.header['cluster_size']
out.write(qed.raw_pread(0, header_size_bytes))
# Copy L1 table
out.seek(qed.header['l1_table_offset'])
s = ''.join(pack_table_elem(x) for x in qed.l1_table)
out.write(s)
# Copy L2 tables
for l2_offset in qed.l1_table:
if l2_offset == 0:
continue
l2_table = qed.read_table(l2_offset)
out.seek(l2_offset)
s = ''.join(pack_table_elem(x) for x in l2_table)
out.write(s)
out.close()
def usage():
print 'Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]
print
print 'Supported commands:'
for cmd in sorted(x for x in globals() if x.startswith('cmd_')):
print globals()[cmd].__doc__
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
filename, cmd = sys.argv[1:3]
cmd = 'cmd_' + cmd.replace('-', '_')
if cmd not in globals():
usage()
qed = QED(open(filename, 'r+b'))
try:
globals()[cmd](qed, *sys.argv[3:])
except TypeError, e:
sys.stderr.write(globals()[cmd].__doc__ + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 |
cloudbau/nova | nova/tests/objects/test_instance_group.py | 4 | 9950 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova import test
from nova.tests.objects import test_objects
class _TestInstanceGroupObjects(test.TestCase):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
metadata = {'key11': 'value1',
'key12': 'value2'}
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
metadata=metadata,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.metadetails, metadata)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_save_metadata(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
metadata = {'foo': 'bar'}
obj_result.metadetails = metadata
obj_result.save()
metadata1 = db.instance_group_metadata_get(self.context,
db_result['uuid'])
for key, value in metadata.iteritems():
self.assertEqual(value, metadata[key])
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup()
group1.policies = ['policy1', 'policy2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup()
group1.members = ['instance1', 'instance2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_create_with_metadata(self):
group1 = instance_group.InstanceGroup()
metadata = {'foo': 'bar'}
group1.metadetails = metadata
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
for key, value in metadata.iteritems():
self.assertEqual(value, group2.metadetails[key])
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup()
group.id = result.id
group.uuid = result.uuid
group.destroy(self.context)
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [('f1', 'p1'), ('f2', 'p1'),
('f3', 'p2'), ('f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['project_id'] = instance[1]
self._create_instance_group(self.context, values)
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertIsInstance(inst_list.objects[i],
instance_group.InstanceGroup)
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertIsInstance(il.objects[i],
instance_group.InstanceGroup)
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].project_id, id)
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
| apache-2.0 |
yanheven/console | openstack_dashboard/urls.py | 7 | 1862 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include # noqa
from django.conf.urls import patterns # noqa
from django.conf.urls.static import static # noqa
from django.conf.urls import url # noqa
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
import horizon
urlpatterns = patterns('',
url(r'^$', 'openstack_dashboard.views.splash', name='splash'),
url(r'^auth/', include('openstack_auth.urls')),
url(r'', include(horizon.urls))
)
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^500/$', 'django.views.defaults.server_error')
)
| apache-2.0 |
martinling/imusim | imusim/io/bvh.py | 1 | 14100 | """
Reading and writing BVH body model movement files.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
from imusim.trajectories.rigid_body import SampledBodyModel, SampledJoint, \
PointTrajectory
from imusim.maths.quaternions import Quaternion
from imusim.maths.transforms import convertCGtoNED, convertNEDtoCG
import numpy as np
# Conversion co-efficient to convert CMU skeletal dimensions to meters
# Taken from the CMU FAQ http://mocap.cs.cmu.edu/faqs.php
CMU_CONVERSION = (1.0/0.45)*2.54/100.0
# Co-efficient to convert values in m to cm
M_TO_CM_CONVERSION = 100
CM_TO_M_CONVERSION = 0.01
def loadBVHFile(filename, conversionFactor=1):
"""
Load sampled body model data from a BVH file.
@param filename: Name of file to load.
@param conversionFactor: Scale factor to apply to position values, e.g.
to convert meters to centimeters use conversionFactor=100.
@return: A L{SampledBodyModel} object.
"""
with open(filename,'r') as bvhFile:
loader = BVHLoader(bvhFile,conversionFactor)
loader._readHeader()
loader._readMotionData()
return loader.model
class BVHLoader(object):
'''
Loader class for BVH files.
'''
def __init__(self, bvhFile, conversionFactor):
'''
Construct a new BVH loader for the specified BVH file.
'''
self.bvhFile = bvhFile
self.tokens = []
self.line = 0
self.totalChannels = 0
self.startOfMotionData = None
self.conversionFactor = conversionFactor
def _readHeader(self):
'''
Read the header information from the BVH file.
'''
assert self.bvhFile.tell() == 0, \
'Must be at the start of file to read header'
self._checkToken("HIERARCHY")
self._checkToken("ROOT")
self.model = SampledBodyModel()
self.model.channels = []
self.jointStack = []
self.jointStack.append(self.model)
self._readJoint()
del self.jointStack
self._checkToken("MOTION")
self._checkToken("Frames:")
self.frameCount = self._intToken()
self._checkToken("Frame")
self._checkToken("Time:")
self.framePeriod = self._floatToken()
def _readMotionData(self):
'''
Read the motion data from the BVH file.
'''
# update joint tree with new data.
frame = 0
lastRotation = {}
for frame in range(self.frameCount):
time = frame * self.framePeriod
channelData = self._readFrame()
for joint in self.model.joints:
for chan in joint.channels:
chanData = channelData.pop(0)
if chan == "Xposition":
xPos = chanData
elif chan == "Yposition":
yPos = chanData
elif chan == "Zposition":
zPos = chanData
elif chan == "Xrotation":
xRot = chanData
elif chan == "Yrotation":
yRot = chanData
elif chan == "Zrotation":
zRot = chanData
else:
raise RuntimeError('Unknown channel: '+chan)
if not joint.hasParent:
try:
p = np.array([[xPos,yPos,zPos]]).T * \
self.conversionFactor
p = convertCGtoNED(p)
del xPos, yPos, zPos
joint.positionKeyFrames.add(time, p)
except UnboundLocalError:
raise SyntaxError("No position data for root joint")
try:
if joint.hasChildren:
q = Quaternion.fromEuler((zRot,xRot,yRot), order='zxy')
q = convertCGtoNED(q)
del xRot, yRot, zRot
# Rotation in BVH is relative to parent joint so
# combine the rotations
if joint.hasParent:
q = lastRotation[joint.parent] * q
joint.rotationKeyFrames.add(time, q)
lastRotation[joint] = q
except UnboundLocalError:
raise SyntaxError("No rotation data for a joint that "
"is not an end effector")
def _readFrame(self):
'''
Read a frame of data from the BVH file.
@return: The channel data as a list of floats.
'''
chanData = self._readline().split()
if len(chanData) != self.totalChannels:
raise SyntaxError, "Syntax error at line %d: Number of entries \
does not match the number of channels. (Found %d of %d)" %(
self.line,len(chanData),self.totalChannels)
return map(float,chanData)
def _readJoint(self):
# use jointStack[-1] to peek at the top of the jointStack
currentJoint = self.jointStack[-1]
name = self._token()
# name will be 'Site' for End Site joints.
if name == 'Site':
name = currentJoint.parent.name+"_end"
currentJoint.name = name
self._checkToken("{")
while True:
token = self._token()
if token == "OFFSET":
x = self._floatToken()
y = self._floatToken()
z = self._floatToken()
currentJoint.positionOffset = \
convertCGtoNED(np.array([[x,y,z]]).T*self.conversionFactor)
elif token == "CHANNELS":
n = self._intToken()
channels = []
for i in range(n):
token = self._token()
if token not in ["Xposition","Yposition","Zposition",\
"Xrotation","Yrotation","Zrotation"]:
raise SyntaxError, "Syntax error in line %d: Invalid \
channel name '%s'" %(self.line,token)
else:
channels.append(token)
self.totalChannels += n
currentJoint.channels = channels
elif token in ("JOINT", "End"):
if token == "JOINT":
joint = SampledJoint(currentJoint)
else:
joint = PointTrajectory(currentJoint)
joint.channels = []
self.jointStack.append(joint)
self._readJoint()
elif token == "}":
self.jointStack.pop()
break
else:
raise SyntaxError,\
"Syntax error in line %d: Unknown keyword '%s'" %(
self.line,token)
def _checkToken(self,expectedToken):
'''
Try to read an expected token from the input queue.
@raise SyntaxError: if the expected token is not found.
'''
token = self._token()
if token != expectedToken:
raise SyntaxError, "Syntax error in line %d: Expected %s \
but found %s" %(self.line,expectedToken,token)
def _intToken(self):
'''
Read an integer value from the token queue.
'''
token = self._token()
try:
return int(token)
except ValueError:
raise SyntaxError, 'Syntax error in line %d: Integer \
expected but found %s' %(self.line,token)
def _floatToken(self):
'''
Read a float value from the token queue.
'''
token = self._token()
try:
return float(token)
except ValueError:
raise SyntaxError, 'Syntax error in line %d: Float \
expected but found %s' %(self.line,token)
def _token(self):
'''
Return the next token in the input file.
'''
try:
return self.tokens.pop(0)
except IndexError:
# There are no more tokens in the queue so read the next line
l = self._readline()
tokens = l.strip().split()
for token in tokens:
self.tokens.append(token)
# recurse to return the next token or read another line
return self._token()
def _readline(self):
'''
Read a line from the input file and add the tokens to the list of
tokens to process.
'''
l = self.bvhFile.readline()
self.line+=1
if l != "":
return l
else:
raise StopIteration
def saveBVHFile(model, filename, samplePeriod, conversionFactor = 1):
"""
Save a body model data to a BVH file.
@param model: Body model to save.
@param filename: Name of file to write to.
@param samplePeriod: The time between frames in the BVH output.
@param conversionFactor: Scale factor to apply to position values, e.g.
to convert meters to centimeters use conversionFactor=100.
"""
with open(filename,'w') as bvhFile:
exporter = BVHExporter(model,bvhFile, samplePeriod, conversionFactor)
exporter.writeHeader()
for frame in xrange(exporter.frames):
exporter.writeFrame(model.startTime + frame * samplePeriod)
class BVHExporter(object):
"""
Exporter class to write BVH files.
@ivar model: The body model to export.
@ivar bvhFile: The BVH file to export data to.
@ivar samplePeriod: The time between frames in the BVH output
@ivar conversionFactor: Scale factor to apply to position values, e.g.
to convert meters to centimeters use conversionFactor=100.
"""
def __init__(self, model, bvhFile, samplePeriod, conversionFactor):
'''
Construct BVH exporter.
@param model: Body model to save.
@param bvhFile: File to write to.
@param samplePeriod: The time between frames in the BVH output.
@param conversionFactor: Scale factor to apply to position values, e.g.
to convert meters to centimeters use conversionFactor=100.
'''
self.bvhFile = bvhFile
self.model = model
self.samplePeriod = samplePeriod
self.conversionFactor = conversionFactor
@property
def frames(self):
""" The number of frames that will be exported. """
return int((self.model.endTime - self.model.startTime) //
self.samplePeriod)
def writeHeader(self):
"""
Write the header of the BVH file.
"""
self.bvhFile.write('HIERARCHY\n')
pad = " "
def post(j):
self.bvhFile.write("%s}\n" %(pad*j.depth))
for p in self.model.preorderTraversal(postFunc=post):
d = p.depth
if not p.hasParent:
self.bvhFile.write("%sROOT %s\n" %(pad*d, p.name))
elif p.isJoint:
self.bvhFile.write("%sJOINT %s\n" %(pad*d, p.name))
else:
self.bvhFile.write("%sEnd Site\n" %(pad*d))
self.bvhFile.write("%s{\n" %(pad*d))
self.bvhFile.write("%sOFFSET" %(pad*(d+1)))
for v in convertNEDtoCG(p.positionOffset):
self.bvhFile.write(" %.8f" % (v * self.conversionFactor))
self.bvhFile.write("\n")
if p.isJoint:
if not hasattr(p, "channels"):
if not p.hasParent:
p.channels = ["Xposition", "Yposition", "Zposition",
"Zrotation", "Xrotation", "Yrotation"]
else:
p.channels = ["Zrotation", "Xrotation", "Yrotation"]
self.bvhFile.write("%sCHANNELS %d" %(pad*(d+1),len(p.channels)))
for chan in p.channels:
self.bvhFile.write(" %s" % chan)
self.bvhFile.write("\n")
self.bvhFile.write("MOTION\nFrames: %d\nFrame Time: %.8f\n" %(
self.frames,self.samplePeriod))
def writeFrame(self, t):
for j in self.model.joints:
if hasattr(j,'channels') and len(j.channels) > 0:
if not j.hasParent:
pos = j.position(t) * self.conversionFactor
pos = convertNEDtoCG(pos)
rot = j.rotation(t)
if j.hasParent:
# BVH rotations are relative to parent joint
# RigidBodyModel rotations are absolute
rot = j.parent.rotation(t).conjugate * rot
rot = convertNEDtoCG(rot)
rot = rot.toEuler(order='zxy')
for chan in j.channels:
if chan == 'Xposition':
self.bvhFile.write("%f " %pos[0])
elif chan == 'Yposition':
self.bvhFile.write("%f " %pos[1])
elif chan == 'Zposition':
self.bvhFile.write("%f " %pos[2])
elif chan == 'Zrotation':
self.bvhFile.write("%f " %rot[0])
elif chan == 'Xrotation':
self.bvhFile.write("%f " %rot[1])
elif chan == 'Yrotation':
self.bvhFile.write("%f " %rot[2])
self.bvhFile.write("\n")
| gpl-3.0 |
MadeiraCloud/salt | sources/salt/pillar/reclass_adapter.py | 1 | 4015 | # -*- coding: utf-8 -*-
'''
Use the "reclass" database as a Pillar source
.. |reclass| replace:: **reclass**
This ``ext_pillar`` plugin provides access to the |reclass| database, such
that Pillar data for a specific minion are fetched using |reclass|.
You can find more information about |reclass| at
http://reclass.pantsfullofunix.net.
To use the plugin, add it to the ``ext_pillar`` list in the Salt master config
and tell |reclass| by way of a few options how and where to find the
inventory:
.. code-block:: yaml
ext_pillar:
- reclass:
storage_type: yaml_fs
inventory_base_uri: /srv/salt
This would cause |reclass| to read the inventory from YAML files in
``/srv/salt/nodes`` and ``/srv/salt/classes``.
If you are also using |reclass| as ``master_tops`` plugin, and you want to
avoid having to specify the same information for both, use YAML anchors (take
note of the differing data types for ``ext_pillar`` and ``master_tops``):
.. code-block:: yaml
reclass: &reclass
storage_type: yaml_fs
inventory_base_uri: /srv/salt
reclass_source_path: ~/code/reclass
ext_pillar:
- reclass: *reclass
master_tops:
reclass: *reclass
If you want to run reclass from source, rather than installing it, you can
either let the master know via the ``PYTHONPATH`` environment variable, or by
setting the configuration option, like in the example above.
'''
# This file cannot be called reclass.py, because then the module import would
# not work. Thanks to the __virtual__ function, however, the plugin still
# responds to the name 'reclass'.
from salt.exceptions import SaltInvocationError
from salt.utils.reclass import (
prepend_reclass_source_path,
filter_out_source_path_option,
set_inventory_base_uri_default
)
# Define the module's virtual name
__virtualname__ = 'reclass'
def __virtual__(retry=False):
try:
import reclass
return __virtualname__
except ImportError as e:
if retry:
return False
for pillar in __opts__.get('ext_pillar', []):
if 'reclass' not in pillar.keys():
continue
# each pillar entry is a single-key hash of name -> options
opts = pillar.values()[0]
prepend_reclass_source_path(opts)
break
return __virtual__(retry=True)
def ext_pillar(minion_id, pillar, **kwargs):
'''
Obtain the Pillar data from **reclass** for the given ``minion_id``.
'''
# If reclass is installed, __virtual__ put it onto the search path, so we
# don't need to protect against ImportError:
from reclass.adapters.salt import ext_pillar as reclass_ext_pillar
from reclass.errors import ReclassException
try:
# the source path we used above isn't something reclass needs to care
# about, so filter it:
filter_out_source_path_option(kwargs)
# if no inventory_base_uri was specified, initialize it to the first
# file_roots of class 'base' (if that exists):
set_inventory_base_uri_default(__opts__, kwargs)
# I purposely do not pass any of __opts__ or __salt__ or __grains__
# to reclass, as I consider those to be Salt-internal and reclass
# should not make any assumptions about it.
return reclass_ext_pillar(minion_id, pillar, **kwargs)
except TypeError as e:
if 'unexpected keyword argument' in e.message:
arg = e.message.split()[-1]
raise SaltInvocationError('ext_pillar.reclass: unexpected option: '
+ arg)
else:
raise
except KeyError as e:
if 'id' in e.message:
raise SaltInvocationError('ext_pillar.reclass: __opts__ does not '
'define minion ID')
else:
raise
except ReclassException as e:
raise SaltInvocationError('ext_pillar.reclass: {0}'.format(e.message))
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/232_test_mimetypes.py | 4 | 2327 | import mimetypes
import StringIO
import unittest
from test import test_support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
mimetypes.inited = False
class MimeTypesTestCase(unittest.TestCase):
def setUp(self):
self.db = mimetypes.MimeTypes()
def test_default_data(self):
eq = self.assertEqual
eq(self.db.guess_type("foo.html"), ("text/html", None))
eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress"))
def test_data_urls(self):
eq = self.assertEqual
guess_type = self.db.guess_type
eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None))
def test_file_parsing(self):
eq = self.assertEqual
sio = StringIO.StringIO("x-application/x-unittest pyunit\n")
self.db.readfp(sio)
eq(self.db.guess_type("foo.pyunit"),
("x-application/x-unittest", None))
eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit")
def test_non_standard_types(self):
eq = self.assertEqual
# First try strict
eq(self.db.guess_type('foo.xul', strict=True), (None, None))
eq(self.db.guess_extension('image/jpg', strict=True), None)
# And then non-strict
eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None))
eq(self.db.guess_extension('image/jpg', strict=False), '.jpg')
def test_guess_all_types(self):
eq = self.assertEqual
# First try strict
all = self.db.guess_all_extensions('text/plain', strict=True)
all.sort()
eq(all, ['.bat', '.c', '.h', '.ksh', '.pl', '.txt'])
# And now non-strict
all = self.db.guess_all_extensions('image/jpg', strict=False)
all.sort()
eq(all, ['.jpg'])
# And now for no hits
all = self.db.guess_all_extensions('image/jpg', strict=True)
eq(all, [])
def test_main():
test_support.run_unittest(MimeTypesTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
ppwwyyxx/tensorflow | tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py | 26 | 8489 | # =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Table-driven test for encode_proto op.
It tests that encode_proto is a lossless inverse of decode_proto
(for the specified fields).
"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.ops import array_ops
class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto encoding ops."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""EncodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(EncodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
def testBadSizesShape(self):
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r'Invalid shape for field double_value.')
else:
expected_error = (ValueError,
r'Shape must be at least rank 2 but is rank 0')
with self.assertRaisesRegexp(*expected_error):
self.evaluate(
self._encode_module.encode_proto(
sizes=1,
values=[np.double(1.0)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
def testBadInputs(self):
# Invalid field name
with self.assertRaisesOpError('Unknown field: non_existent_field'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['non_existent_field']))
# Incorrect types.
with self.assertRaisesOpError('Incompatible type for field double_value.'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
# Incorrect shapes of sizes.
for sizes_value in 1, np.array([[[0, 0]]]):
with self.assertRaisesOpError(
r'sizes should be batch_size \+ \[len\(field_names\)\]'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=sizes_value,
values=[np.array([[0.0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
else:
with self.cached_session():
sizes = array_ops.placeholder(dtypes.int32)
values = array_ops.placeholder(dtypes.float64)
self._encode_module.encode_proto(
sizes=sizes,
values=[values],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']).eval(feed_dict={
sizes: sizes_value,
values: [[0.0]]
})
# Inconsistent shapes of values.
with self.assertRaisesOpError('Values must match up to the last dimension'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[np.array([[0.0]]),
np.array([[0], [0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']))
else:
with self.cached_session():
values1 = array_ops.placeholder(dtypes.float64)
values2 = array_ops.placeholder(dtypes.int32)
(self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[values1, values2],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']).eval(feed_dict={
values1: [[0.0]],
values2: [[0], [0]]
}))
def _testRoundtrip(self, in_bufs, message_type, fields):
field_names = [f.name for f in fields]
out_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=out_types)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
in_obj = test_example_pb2.TestValue()
in_obj.ParseFromString(in_buf)
out_obj = test_example_pb2.TestValue()
out_obj.ParseFromString(out_buf)
# Check that the deserialized objects are identical.
self.assertEqual(in_obj, out_obj)
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtrip(self, case):
in_bufs = [value.SerializeToString() for value in case.values]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtripPacked(self, case):
# Now try with the packed serialization.
# We test the packed representations by loading the same test cases using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the test
# message using the packed version of the proto.
in_bufs = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(
value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
| apache-2.0 |
flatangle/flatlib | contrib/topical_almuten.py | 1 | 5509 | from flatlib import const
from flatlib.tools import planetarytime
from flatlib.dignities import essential
from flatlib.tools import arabicparts
# List of dignities
DIGNITY_LIST = [
'ruler',
'exalt',
'dayTrip',
'nightTrip',
'partTrip',
'term',
'face'
]
# List of objects
OBJECT_LIST = const.LIST_SEVEN_PLANETS
def newRow():
""" Returns a new Almutem table row. """
row = {}
for obj in OBJECT_LIST:
row[obj] = {
'string': '',
'score': 0
}
return row
# Computes the topical almuten (TA) table.
def computeTA(chart, TA):
almutems = {}
# SECOND HOUSE,
# source: Omar of Tiberias - Three Books on Nativities, p. 57; Persian Nativities II, p. 52
if TA == "TA_2H":
TA_LIST = [
chart.getHouse(const.HOUSE2)
]
TA_LIST.extend(
[v for k, v in chart.objects.getObjectsInHouse(chart.getHouse(const.HOUSE2)).content.items()])
TA_LIST.extend(
[chart.getObject(essential.ruler(chart.getHouse(const.HOUSE2).sign)),
arabicparts.getPart(arabicparts.PARS_SUBSTANCE, chart),
chart.getObject(essential.ruler(arabicparts.getPart(arabicparts.PARS_SUBSTANCE, chart).sign)),
chart.getObject(const.JUPITER),
chart.getObject(const.PARS_FORTUNA),
chart.getObject(essential.ruler(chart.getObject(const.PARS_FORTUNA).sign))
])
# THIRD HOUSE,
# source: Omar of Tiberias - Three Books on Nativities, p. 58-59; Persian Nativities II, p. 53
if TA == "TA_3H":
TA_LIST = [
chart.getHouse(const.HOUSE3)
]
TA_LIST.extend(
[v for k, v in chart.objects.getObjectsInHouse(chart.getHouse(const.HOUSE3)).content.items()])
TA_LIST.extend(
[chart.getObject(essential.ruler(chart.getHouse(const.HOUSE3).sign)),
arabicparts.getPart(arabicparts.PARS_BROTHERS, chart),
chart.getObject(essential.ruler(arabicparts.getPart(arabicparts.PARS_BROTHERS, chart).sign)),
chart.getObject(const.MARS),
chart.getObject(essential.dayTrip(chart.getObject(const.MARS).sign)),
chart.getObject(essential.nightTrip(chart.getObject(const.MARS).sign)),
chart.getObject(essential.partTrip(chart.getObject(const.MARS).sign))
])
# 4th HOUSE,
# Status of father - source: Persian Nativities II, p. 204
if TA == "TA_4H_Father_Status":
TA_LIST = [
chart.getHouse(const.HOUSE4)
]
TA_LIST.extend(
[v for k, v in chart.objects.getObjectsInHouse(chart.getHouse(const.HOUSE4)).content.items()])
TA_LIST.extend(
[chart.getObject(essential.ruler(chart.getHouse(const.HOUSE4).sign)),
arabicparts.getPart(arabicparts.PARS_FATHER, chart),
chart.getObject(essential.ruler(arabicparts.getPart(arabicparts.PARS_FATHER, chart).sign)),
chart.getObject(const.SATURN),
chart.getObject(const.SUN),
chart.getObject(const.SYZYGY),
])
# Father - source: JOHANNES SCHOENER, p. 51
if TA == "TA_4H_Father_SCHOENER":
TA_LIST = [
chart.getHouse(const.HOUSE4)
]
TA_LIST.extend(
[chart.getObject(essential.ruler(chart.getHouse(const.HOUSE4).sign)),
chart.getObject(const.SUN),
chart.getObject(essential.ruler(chart.getObject(const.SUN))),
arabicparts.getPart(arabicparts.PARS_FATHER, chart),
chart.getObject(essential.ruler(arabicparts.getPart(arabicparts.PARS_FATHER, chart).sign)),
])
if chart.isDiurnal():
TA_LIST.extend([chart.getObject(essential.dayTrip(chart.getHouse(const.HOUSE4).sign])))
else: TA_LIST.extend([chart.getObject(essential.nightTrip(chart.getHouse(const.HOUSE4).sign])))
# 10th HOUSE,
# Status of mother - source: Persian Nativities II, p. 51
if TA == "TA_4H_Mother_Status":
TA_LIST = [
chart.getHouse(const.HOUSE10)
]
TA_LIST.extend(
[v for k, v in chart.objects.getObjectsInHouse(chart.getHouse(const.HOUSE10)).content.items()])
TA_LIST.extend(
[chart.getObject(essential.ruler(chart.getHouse(const.HOUSE10).sign)),
arabicparts.getPart(arabicparts.PARS_MOTHER, chart),
chart.getObject(essential.ruler(arabicparts.getPart(arabicparts.PARS_MOTHER, chart).sign)),
chart.getObject(const.MOON),
chart.getObject(const.VENUS),
chart.getObject(const.SYZYGY),
])
# And many more...
for hyleg in TA_LIST:
row = newRow()
digInfo = essential.getInfo(hyleg.sign, hyleg.signlon)
# Add the scores of each planet where hyleg has dignities
for dignity in DIGNITY_LIST:
objID = digInfo[dignity]
if objID:
score = essential.SCORES[dignity]
row[objID]['string'] += '+%s' % score
row[objID]['score'] += score
almutems[hyleg.id] = row
# Compute scores
scores = newRow()
for _property, _list in almutems.items():
for objID, values in _list.items():
scores[objID]['string'] += values['string']
scores[objID]['score'] += values['score']
almutems['Score'] = scores
return almutems
| mit |
phalax4/CarnotKE | jyhton/lib-python/2.7/unittest/test/test_suite.py | 111 | 12069 | import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite()),
(unittest.TestSuite(), unittest.TestSuite([])),
(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1')),
(unittest.TestSuite([]), _mk_TestSuite('test_1')),
(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3')),
(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kvar/ansible | lib/ansible/plugins/shell/csh.py | 118 | 1217 | # Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.shell import ShellBase
DOCUMENTATION = '''
name: csh
plugin_type: shell
version_added: ""
short_description: C shell (/bin/csh)
description:
- When you have no other option than to use csh
extends_documentation_fragment:
- shell_common
'''
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'csh'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
_SHELL_REDIRECT_ALLNULL = '>& /dev/null'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
| gpl-3.0 |
kvar/ansible | lib/ansible/parsing/utils/yaml.py | 96 | 2809 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from yaml import YAMLError
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.module_utils._text import to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.parsing.ajson import AnsibleJSONDecoder
__all__ = ('from_yaml',)
def _handle_error(yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
err_msg = getattr(yaml_exc, 'problem', '')
raise AnsibleParserError(YAML_SYNTAX_ERROR % to_native(err_msg), obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
def _safe_load(stream, file_name=None, vault_secrets=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name, vault_secrets)
try:
return loader.get_single_data()
finally:
try:
loader.dispose()
except AttributeError:
pass # older versions of yaml don't have dispose function, ignore
def from_yaml(data, file_name='<string>', show_content=True, vault_secrets=None):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
new_data = None
try:
# in case we have to deal with vaults
AnsibleJSONDecoder.set_secrets(vault_secrets)
# we first try to load this data as JSON.
# Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
new_data = json.loads(data, cls=AnsibleJSONDecoder)
except Exception:
# must not be JSON, let the rest try
try:
new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
except YAMLError as yaml_exc:
_handle_error(yaml_exc, file_name, show_content)
return new_data
| gpl-3.0 |
husigeza/pycom-micropython-sigfox | tests/basics/string_partition.py | 16 | 1026 | try:
str.partition
except AttributeError:
print("SKIP")
import sys
sys.exit()
print("asdf".partition('g'))
print("asdf".partition('a'))
print("asdf".partition('s'))
print("asdf".partition('f'))
print("asdf".partition('d'))
print("asdf".partition('asd'))
print("asdf".partition('sdf'))
print("asdf".partition('as'))
print("asdf".partition('df'))
print("asdf".partition('asdf'))
print("asdf".partition('asdfa'))
print("asdf".partition('fasdf'))
print("asdf".partition('fasdfa'))
print("abba".partition('a'))
print("abba".partition('b'))
try:
print("asdf".partition(1))
except TypeError:
print("Raised TypeError")
else:
print("Did not raise TypeError")
try:
print("asdf".partition(''))
except ValueError:
print("Raised ValueError")
else:
print("Did not raise ValueError")
# Bytes
print(b"abba".partition(b'b'))
try:
print(b"abba".partition('b'))
except TypeError:
print("Raised TypeError")
try:
print("abba".partition(b'b'))
except TypeError:
print("Raised TypeError")
| mit |
Hasimir/brython | www/src/Lib/test/test_macpath.py | 101 | 4932 | import macpath
from test import support, test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = macpath
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
40223102/w110519 | static/Brython3.1.1-20150328-091302/Lib/jqueryui/__init__.py | 603 | 3671 | """Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.js', ['jQuery'])
javascript.load(_path+'jquery-ui.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_vpn_gateways_operations.py | 1 | 48948 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations:
"""VpnGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> "_models.VpnGateway":
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_11_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _reset_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def begin_reset(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def _start_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def begin_start_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs
) -> AsyncLROPoller[str]:
"""Starts packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to start packet capture on
vpn gateway.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.VpnGatewayPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def _stop_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStopParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
async def begin_stop_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs
) -> AsyncLROPoller[str]:
"""Stops packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to stop packet capture on vpn
gateway.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.VpnGatewayPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
| mit |
bogdandrutu/grpc | src/python/grpcio_health_checking/grpc/health/v1/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
trustcircleglobal/tcg-gae | docs/conf.py | 1 | 8403 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tcg-gae documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import tcg-gae
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tcggae'
copyright = u'2015, Kien Nguyen Trung'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = tcg-gae.__version__
# The full version, including alpha/beta/rc tags.
release = tcg-gae.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tcg-gaedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'tcg-gae.tex',
u'tcggae Documentation',
u'Kien Nguyen Trung', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tcg-gae',
u'tcggae Documentation',
[u'Kien Nguyen Trung'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tcg-gae',
u'tcggae Documentation',
u'Kien Nguyen Trung',
'tcg-gae',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| isc |
guilhem/LGE975_G_Kitkat_Android_V20a_Kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
tux-00/ansible | lib/ansible/modules/network/cloudengine/ce_link_status.py | 42 | 22114 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_link_status
version_added: "2.4"
short_description: Get interface link status on HUAWEI CloudEngine switches.
description:
- Get interface link status on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@CloudEngine-Ansible)
notes:
- Current physical state shows an interface's physical status.
- Current link state shows an interface's link layer protocol status.
- Current IPv4 state shows an interface's IPv4 protocol status.
- Current IPv6 state shows an interface's IPv6 protocol status.
- Inbound octets(bytes) shows the number of bytes that an interface received.
- Inbound unicast(pkts) shows the number of unicast packets that an interface received.
- Inbound multicast(pkts) shows the number of multicast packets that an interface received.
- Inbound broadcast(pkts) shows the number of broadcast packets that an interface received.
- Inbound error(pkts) shows the number of error packets that an interface received.
- Inbound drop(pkts) shows the total number of packets that were sent to the interface but dropped by an interface.
- Inbound rate(byte/sec) shows the rate at which an interface receives bytes within an interval.
- Inbound rate(pkts/sec) shows the rate at which an interface receives packets within an interval.
- Outbound octets(bytes) shows the number of the bytes that an interface sent.
- Outbound unicast(pkts) shows the number of unicast packets that an interface sent.
- Outbound multicast(pkts) shows the number of multicast packets that an interface sent.
- Outbound broadcast(pkts) shows the number of broadcast packets that an interface sent.
- Outbound error(pkts) shows the total number of packets that an interface sent but dropped by the remote interface.
- Outbound drop(pkts) shows the number of dropped packets that an interface sent.
- Outbound rate(byte/sec) shows the rate at which an interface sends bytes within an interval.
- Outbound rate(pkts/sec) shows the rate at which an interface sends packets within an interval.
- Speed shows the rate for an Ethernet interface.
options:
interface:
description:
- For the interface parameter, you can enter C(all) to display information about all interface,
an interface type such as C(40GE) to display information about interfaces of the specified type,
or full name of an interface such as C(40GE1/0/22) or C(vlanif10)
to display information about the specific interface.
required: true
'''
EXAMPLES = '''
- name: Link status test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Get specified interface link status information
ce_link_status:
interface: 40GE1/0/1
provider: "{{ cli }}"
- name: Get specified interface type link status information
ce_link_status:
interface: 40GE
provider: "{{ cli }}"
- name: Get all interface link status information
ce_link_status:
interface: all
provider: "{{ cli }}"
'''
RETURN = '''
result:
description: Interface link status information
returned: always
type: dict
sample: {
"40ge2/0/8": {
"Current IPv4 state": "down",
"Current IPv6 state": "down",
"Current link state": "up",
"Current physical state": "up",
"Inbound broadcast(pkts)": "0",
"Inbound drop(pkts)": "0",
"Inbound error(pkts)": "0",
"Inbound multicast(pkts)": "20151",
"Inbound octets(bytes)": "7314813",
"Inbound rate(byte/sec)": "11",
"Inbound rate(pkts/sec)": "0",
"Inbound unicast(pkts)": "0",
"Outbound broadcast(pkts)": "1",
"Outbound drop(pkts)": "0",
"Outbound error(pkts)": "0",
"Outbound multicast(pkts)": "20152",
"Outbound octets(bytes)": "7235021",
"Outbound rate(byte/sec)": "11",
"Outbound rate(pkts/sec)": "0",
"Outbound unicast(pkts)": "0",
"Speed": "40GE"
}
}
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, get_nc_config
CE_NC_GET_PORT_SPEED = """
<filter type="subtree">
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ports>
<port>
<position>%s</position>
<ethernetPort>
<speed></speed>
</ethernetPort>
</port>
</ports>
</devm>
</filter>
"""
CE_NC_GET_INT_STATISTICS = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<ifDynamicInfo>
<ifPhyStatus></ifPhyStatus>
<ifLinkStatus></ifLinkStatus>
<ifV4State></ifV4State>
<ifV6State></ifV6State>
</ifDynamicInfo>
<ifStatistics>
<receiveByte></receiveByte>
<sendByte></sendByte>
<rcvUniPacket></rcvUniPacket>
<rcvMutiPacket></rcvMutiPacket>
<rcvBroadPacket></rcvBroadPacket>
<sendUniPacket></sendUniPacket>
<sendMutiPacket></sendMutiPacket>
<sendBroadPacket></sendBroadPacket>
<rcvErrorPacket></rcvErrorPacket>
<rcvDropPacket></rcvDropPacket>
<sendErrorPacket></sendErrorPacket>
<sendDropPacket></sendDropPacket>
</ifStatistics>
<ifClearedStat>
<inByteRate></inByteRate>
<inPacketRate></inPacketRate>
<outByteRate></outByteRate>
<outPacketRate></outPacketRate>
</ifClearedStat>
</interface>
</interfaces>
</ifm>
</filter>
"""
INTERFACE_ALL = 1
INTERFACE_TYPE = 2
INTERFACE_FULL_NAME = 3
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_ethernet_port(interface):
"""Judge whether it is ethernet port"""
ethernet_port = ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'meth']
if_type = get_interface_type(interface)
if if_type in ethernet_port:
return True
return False
class LinkStatus(object):
"""Get interface link status information"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface name
self.interface = self.module.params['interface']
self.interface = self.interface.replace(' ', '').lower()
self.param_type = None
self.if_type = None
# state
self.results = dict()
self.result = dict()
def check_params(self):
"""Check all input params"""
if not self.interface:
self.module.fail_json(msg='Error: Interface name cannot be empty.')
if self.interface and self.interface != 'all':
if not self.if_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def show_result(self):
"""Show result"""
self.results['result'] = self.result
self.module.exit_json(**self.results)
def get_intf_dynamic_info(self, dyn_info, intf_name):
"""Get interface dynamic information"""
if not intf_name:
return
if dyn_info:
for eles in dyn_info:
if eles.tag in ["ifPhyStatus", "ifV4State", "ifV6State", "ifLinkStatus"]:
if eles.tag == "ifPhyStatus":
self.result[intf_name][
'Current physical state'] = eles.text
elif eles.tag == "ifLinkStatus":
self.result[intf_name][
'Current link state'] = eles.text
elif eles.tag == "ifV4State":
self.result[intf_name][
'Current IPv4 state'] = eles.text
elif eles.tag == "ifV6State":
self.result[intf_name][
'Current IPv6 state'] = eles.text
def get_intf_statistics_info(self, stat_info, intf_name):
"""Get interface statistics information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if stat_info:
for eles in stat_info:
if eles.tag in ["receiveByte", "sendByte", "rcvUniPacket", "rcvMutiPacket", "rcvBroadPacket",
"sendUniPacket", "sendMutiPacket", "sendBroadPacket", "rcvErrorPacket",
"rcvDropPacket", "sendErrorPacket", "sendDropPacket"]:
if eles.tag == "receiveByte":
self.result[intf_name][
'Inbound octets(bytes)'] = eles.text
elif eles.tag == "rcvUniPacket":
self.result[intf_name][
'Inbound unicast(pkts)'] = eles.text
elif eles.tag == "rcvMutiPacket":
self.result[intf_name][
'Inbound multicast(pkts)'] = eles.text
elif eles.tag == "rcvBroadPacket":
self.result[intf_name][
'Inbound broadcast(pkts)'] = eles.text
elif eles.tag == "rcvErrorPacket":
self.result[intf_name][
'Inbound error(pkts)'] = eles.text
elif eles.tag == "rcvDropPacket":
self.result[intf_name][
'Inbound drop(pkts)'] = eles.text
elif eles.tag == "sendByte":
self.result[intf_name][
'Outbound octets(bytes)'] = eles.text
elif eles.tag == "sendUniPacket":
self.result[intf_name][
'Outbound unicast(pkts)'] = eles.text
elif eles.tag == "sendMutiPacket":
self.result[intf_name][
'Outbound multicast(pkts)'] = eles.text
elif eles.tag == "sendBroadPacket":
self.result[intf_name][
'Outbound broadcast(pkts)'] = eles.text
elif eles.tag == "sendErrorPacket":
self.result[intf_name][
'Outbound error(pkts)'] = eles.text
elif eles.tag == "sendDropPacket":
self.result[intf_name][
'Outbound drop(pkts)'] = eles.text
def get_intf_cleared_stat(self, clr_stat, intf_name):
"""Get interface cleared state information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if clr_stat:
for eles in clr_stat:
if eles.tag in ["inByteRate", "inPacketRate", "outByteRate", "outPacketRate"]:
if eles.tag == "inByteRate":
self.result[intf_name][
'Inbound rate(byte/sec)'] = eles.text
elif eles.tag == "inPacketRate":
self.result[intf_name][
'Inbound rate(pkts/sec)'] = eles.text
elif eles.tag == "outByteRate":
self.result[intf_name][
'Outbound rate(byte/sec)'] = eles.text
elif eles.tag == "outPacketRate":
self.result[intf_name][
'Outbound rate(pkts/sec)'] = eles.text
def get_all_interface_info(self, intf_type=None):
"""Get interface information all or by interface type"""
xml_str = CE_NC_GET_INT_STATISTICS % ''
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intfs_info = root.find("data/ifm/interfaces")
if not intfs_info:
return
intf_name = ''
flag = False
for eles in intfs_info:
if eles.tag == "interface":
for ele in eles:
if ele.tag in ["ifName", "ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if ele.tag == "ifName":
intf_name = ele.text.lower()
if intf_type:
if get_interface_type(intf_name) != intf_type.lower():
break
else:
flag = True
self.init_interface_data(intf_name)
if is_ethernet_port(intf_name):
self.get_port_info(intf_name)
if ele.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(ele, intf_name)
elif ele.tag == "ifStatistics":
self.get_intf_statistics_info(ele, intf_name)
elif ele.tag == "ifClearedStat":
self.get_intf_cleared_stat(ele, intf_name)
if intf_type and not flag:
self.module.fail_json(
msg='Error: %s interface type does not exist.' % intf_type.upper())
def get_interface_info(self):
"""Get interface information"""
xml_str = CE_NC_GET_INT_STATISTICS % self.interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
self.module.fail_json(
msg='Error: %s interface does not exist.' % self.interface.upper())
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intf_info = root.find("data/ifm/interfaces/interface")
if intf_info:
for eles in intf_info:
if eles.tag in ["ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if eles.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(eles, self.interface)
elif eles.tag == "ifStatistics":
self.get_intf_statistics_info(eles, self.interface)
elif eles.tag == "ifClearedStat":
self.get_intf_cleared_stat(eles, self.interface)
def init_interface_data(self, intf_name):
"""Init interface data"""
# init link status data
self.result[intf_name] = dict()
self.result[intf_name]['Current physical state'] = 'down'
self.result[intf_name]['Current link state'] = 'down'
self.result[intf_name]['Current IPv4 state'] = 'down'
self.result[intf_name]['Current IPv6 state'] = 'down'
self.result[intf_name]['Inbound octets(bytes)'] = '--'
self.result[intf_name]['Inbound unicast(pkts)'] = '--'
self.result[intf_name]['Inbound multicast(pkts)'] = '--'
self.result[intf_name]['Inbound broadcast(pkts)'] = '--'
self.result[intf_name]['Inbound error(pkts)'] = '--'
self.result[intf_name]['Inbound drop(pkts)'] = '--'
self.result[intf_name]['Inbound rate(byte/sec)'] = '--'
self.result[intf_name]['Inbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Outbound octets(bytes)'] = '--'
self.result[intf_name]['Outbound unicast(pkts)'] = '--'
self.result[intf_name]['Outbound multicast(pkts)'] = '--'
self.result[intf_name]['Outbound broadcast(pkts)'] = '--'
self.result[intf_name]['Outbound error(pkts)'] = '--'
self.result[intf_name]['Outbound drop(pkts)'] = '--'
self.result[intf_name]['Outbound rate(byte/sec)'] = '--'
self.result[intf_name]['Outbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Speed'] = '--'
def get_port_info(self, interface):
"""Get port information"""
if_type = get_interface_type(interface)
if if_type == 'meth':
xml_str = CE_NC_GET_PORT_SPEED % interface.lower().replace('meth', 'MEth')
else:
xml_str = CE_NC_GET_PORT_SPEED % interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
port_info = root.find("data/devm/ports/port")
if port_info:
for eles in port_info:
if eles.tag == "ethernetPort":
for ele in eles:
if ele.tag == 'speed':
self.result[interface]['Speed'] = ele.text
def get_link_status(self):
"""Get link status information"""
if self.param_type == INTERFACE_FULL_NAME:
self.init_interface_data(self.interface)
self.get_interface_info()
if is_ethernet_port(self.interface):
self.get_port_info(self.interface)
elif self.param_type == INTERFACE_TYPE:
self.get_all_interface_info(self.interface)
else:
self.get_all_interface_info()
def get_intf_param_type(self):
"""Get the type of input interface parameter"""
if self.interface == 'all':
self.param_type = INTERFACE_ALL
return
if self.if_type == self.interface:
self.param_type = INTERFACE_TYPE
return
self.param_type = INTERFACE_FULL_NAME
def work(self):
"""Worker"""
self.if_type = get_interface_type(self.interface)
self.check_params()
self.get_intf_param_type()
self.get_link_status()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
interface=dict(required=True, type='str'),
)
argument_spec.update(ce_argument_spec)
linkstatus_obj = LinkStatus(argument_spec)
linkstatus_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
N-Parsons/exercism-python | exercises/dnd-character/dnd_character_test.py | 3 | 2411 | import unittest
from dnd_character import Character, modifier
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class DnDCharacterTest(unittest.TestCase):
def test_modifier_for_score_3_is_n4(self):
self.assertEqual(modifier(3), -4)
def test_modifier_for_score_4_is_n3(self):
self.assertEqual(modifier(4), -3)
def test_modifier_for_score_5_is_n3(self):
self.assertEqual(modifier(5), -3)
def test_modifier_for_score_6_is_n2(self):
self.assertEqual(modifier(6), -2)
def test_modifier_for_score_7_is_n2(self):
self.assertEqual(modifier(7), -2)
def test_modifier_for_score_8_is_n1(self):
self.assertEqual(modifier(8), -1)
def test_modifier_for_score_9_is_n1(self):
self.assertEqual(modifier(9), -1)
def test_modifier_for_score_10_is_0(self):
self.assertEqual(modifier(10), 0)
def test_modifier_for_score_11_is_0(self):
self.assertEqual(modifier(11), 0)
def test_modifier_for_score_12_is_1(self):
self.assertEqual(modifier(12), 1)
def test_modifier_for_score_13_is_1(self):
self.assertEqual(modifier(13), 1)
def test_modifier_for_score_14_is_2(self):
self.assertEqual(modifier(14), 2)
def test_modifier_for_score_15_is_2(self):
self.assertEqual(modifier(15), 2)
def test_modifier_for_score_16_is_3(self):
self.assertEqual(modifier(16), 3)
def test_modifier_for_score_17_is_3(self):
self.assertEqual(modifier(17), 3)
def test_modifier_for_score_18_is_4(self):
self.assertEqual(modifier(18), 4)
def test_random_ability_is_within_range(self):
self.assertIn(Character().ability(), range(3, 19))
def test_random_character_is_valid(self):
Char = Character()
self.assertIn(Char.strength, range(3, 19))
self.assertIn(Char.dexterity, range(3, 19))
self.assertIn(Char.constitution, range(3, 19))
self.assertIn(Char.intelligence, range(3, 19))
self.assertIn(Char.wisdom, range(3, 19))
self.assertIn(Char.charisma, range(3, 19))
self.assertEqual(
Char.hitpoints,
10 + modifier(Char.constitution))
def test_each_ability_is_only_calculated_once(self):
Char = Character()
self.assertEqual(Char.strength, Char.strength)
if __name__ == '__main__':
unittest.main()
| mit |
Spredzy/dotfiles | vim/bundle/ultisnips/plugin/UltiSnips/__init__.py | 2 | 37546 | #!/usr/bin/env python
# encoding: utf-8
from functools import wraps
from collections import deque, defaultdict
import glob
import hashlib
import os
import re
import traceback
from UltiSnips.compatibility import as_unicode, byte2col
from UltiSnips._diff import diff, guess_edit
from UltiSnips.geometry import Position
from UltiSnips.text_objects import SnippetInstance
from UltiSnips.util import IndentUtil
import UltiSnips._vim as _vim
def err_to_scratch_buffer(f):
@wraps(f)
def wrapper(self, *args, **kwds):
try:
return f(self, *args, **kwds)
except:
s = \
"""An error occured. This is either a bug in UltiSnips or a bug in a
snippet definition. If you think this is a bug, please report it to
https://bugs.launchpad.net/ultisnips/+filebug.
Following is the full stack trace:
"""
s += traceback.format_exc()
self.leaving_buffer() # Vim sends no WinLeave msg here.
_vim.new_scratch_buffer(s)
return wrapper
class _SnippetDictionary(object):
def __init__(self, *args, **kwargs):
self._added = []
self.reset()
def add_snippet(self, s, fn=None):
if fn:
self._snippets.append(s)
if fn not in self.files:
self.addfile(fn)
else:
self._added.append(s)
def get_matching_snippets(self, trigger, potentially):
"""Returns all snippets matching the given trigger."""
if not potentially:
return [ s for s in self.snippets if s.matches(trigger) ]
else:
return [ s for s in self.snippets if s.could_match(trigger) ]
@property
def snippets(self):
return self._added + self._snippets
def clear_snippets(self, triggers=[]):
"""Remove all snippets that match each trigger in triggers.
When triggers is empty, removes all snippets.
"""
if triggers:
for t in triggers:
for s in self.get_matching_snippets(t, potentially=False):
if s in self._snippets:
self._snippets.remove(s)
if s in self._added:
self._added.remove(s)
else:
self._snippets = []
self._added = []
@property
def files(self):
return self._files
def reset(self):
self._snippets = []
self._extends = []
self._files = {}
def _hash(self, path):
if not os.path.isfile(path):
return False
return hashlib.sha1(open(path, "rb").read()).hexdigest()
def addfile(self, path):
self.files[path] = self._hash(path)
def needs_update(self):
for path, hash in self.files.items():
if not hash or hash != self._hash(path):
return True
return False
def extends():
def fget(self):
return self._extends
def fset(self, value):
self._extends = value
return locals()
extends = property(**extends())
class _SnippetsFileParser(object):
def __init__(self, ft, fn, snip_manager, file_data=None):
self._sm = snip_manager
self._ft = ft
self._fn = fn
self._globals = {}
if file_data is None:
self._lines = open(fn).readlines()
else:
self._lines = file_data.splitlines(True)
self._idx = 0
def _error(self, msg):
fn = _vim.eval("""fnamemodify(%s, ":~:.")""" % _vim.escape(self._fn))
self._sm._error("%s in %s(%d)" % (msg, fn, self._idx + 1))
def _line(self):
if self._idx < len(self._lines):
line = self._lines[self._idx]
else:
line = ""
return line
def _line_head_tail(self):
parts = re.split(r"\s+", self._line().rstrip(), maxsplit=1)
parts.append('')
return parts[:2]
def _line_head(self):
return self._line_head_tail()[0]
def _line_tail(self):
return self._line_head_tail()[1]
def _goto_next_line(self):
self._idx += 1
return self._line()
def _parse_first(self, line):
""" Parses the first line of the snippet definition. Returns the
snippet type, trigger, description, and options in a tuple in that
order.
"""
cdescr = ""
coptions = ""
cs = ""
# Ensure this is a snippet
snip = line.split()[0]
# Get and strip options if they exist
remain = line[len(snip):].strip()
words = remain.split()
if len(words) > 2:
# second to last word ends with a quote
if '"' not in words[-1] and words[-2][-1] == '"':
coptions = words[-1]
remain = remain[:-len(coptions) - 1].rstrip()
# Get and strip description if it exists
remain = remain.strip()
if len(remain.split()) > 1 and remain[-1] == '"':
left = remain[:-1].rfind('"')
if left != -1 and left != 0:
cdescr, remain = remain[left:], remain[:left]
# The rest is the trigger
cs = remain.strip()
if len(cs.split()) > 1 or "r" in coptions:
if cs[0] != cs[-1]:
self._error("Invalid multiword trigger: '%s'" % cs)
cs = ""
else:
cs = cs[1:-1]
return (snip, cs, cdescr, coptions)
def _parse_snippet(self):
line = self._line()
(snip, trig, desc, opts) = self._parse_first(line)
end = "end" + snip
cv = ""
while self._goto_next_line():
line = self._line()
if line.rstrip() == end:
cv = cv[:-1] # Chop the last newline
break
cv += line
else:
self._error("Missing 'endsnippet' for %r" % trig)
return None
if not trig:
# there was an error
return None
elif snip == "global":
# add snippet contents to file globals
if trig not in self._globals:
self._globals[trig] = []
self._globals[trig].append(cv)
elif snip == "snippet":
self._sm.add_snippet(trig, cv, desc, opts, self._ft, self._globals, fn=self._fn)
else:
self._error("Invalid snippet type: '%s'" % snip)
def parse(self):
while self._line():
head, tail = self._line_head_tail()
if head == "extends":
if tail:
self._sm.add_extending_info(self._ft,
[ p.strip() for p in tail.split(',') ])
else:
self._error("'extends' without file types")
elif head in ("snippet", "global"):
self._parse_snippet()
elif head == "clearsnippets":
self._sm.clear_snippets(tail.split(), self._ft)
elif head and not head.startswith('#'):
self._error("Invalid line %r" % self._line().rstrip())
break
self._goto_next_line()
class Snippet(object):
_INDENT = re.compile(r"^[ \t]*")
_TABS = re.compile(r"^\t*")
def __init__(self, trigger, value, descr, options, globals):
self._t = as_unicode(trigger)
self._v = as_unicode(value)
self._d = as_unicode(descr)
self._opts = options
self._matched = ""
self._last_re = None
self._globals = globals
def __repr__(self):
return "Snippet(%s,%s,%s)" % (self._t,self._d,self._opts)
def _words_for_line(self, before, num_words=None):
""" Gets the final num_words words from before.
If num_words is None, then use the number of words in
the trigger.
"""
words = ''
if not len(before):
return ''
if num_words is None:
num_words = len(self._t.split())
word_list = before.split()
if len(word_list) <= num_words:
return before.strip()
else:
before_words = before
for i in range(-1, -(num_words + 1), -1):
left = before_words.rfind(word_list[i])
before_words = before_words[:left]
return before[len(before_words):].strip()
def _re_match(self, trigger):
""" Test if a the current regex trigger matches
`trigger`. If so, set _last_re and _matched.
"""
for match in re.finditer(self._t, trigger):
if match.end() != len(trigger):
continue
else:
self._matched = trigger[match.start():match.end()]
self._last_re = match
return match
return False
def has_option(self, opt):
""" Check if the named option is set """
return opt in self._opts
def matches(self, trigger):
# If user supplies both "w" and "i", it should perhaps be an
# error, but if permitted it seems that "w" should take precedence
# (since matching at word boundary and within a word == matching at word
# boundary).
self._matched = ""
# Don't expand on whitespace
if trigger and trigger.rstrip() != trigger:
return False
words = self._words_for_line(trigger)
if "r" in self._opts:
match = self._re_match(trigger)
elif "w" in self._opts:
words_len = len(self._t)
words_prefix = words[:-words_len]
words_suffix = words[-words_len:]
match = (words_suffix == self._t)
if match and words_prefix:
# Require a word boundary between prefix and suffix.
boundaryChars = words_prefix[-1:] + words_suffix[:1]
match = re.match(r'.\b.', boundaryChars)
elif "i" in self._opts:
match = words.endswith(self._t)
else:
match = (words == self._t)
# By default, we match the whole trigger
if match and not self._matched:
self._matched = self._t
# Ensure the match was on a word boundry if needed
if "b" in self._opts and match:
text_before = trigger.rstrip()[:-len(self._matched)]
if text_before.strip(" \t") != '':
self._matched = ""
return False
return match
def could_match(self, trigger):
self._matched = ""
# List all on whitespace.
if trigger and trigger[-1] in (" ", "\t"):
trigger = ""
if trigger and trigger.rstrip() is not trigger:
return False
words = self._words_for_line(trigger)
if "r" in self._opts:
# Test for full match only
match = self._re_match(trigger)
elif "w" in self._opts:
# Trim non-empty prefix up to word boundary, if present.
words_suffix = re.sub(r'^.+\b(.+)$', r'\1', words)
match = self._t.startswith(words_suffix)
self._matched = words_suffix
# TODO: list_snippets() function cannot handle partial-trigger
# matches yet, so for now fail if we trimmed the prefix.
if words_suffix != words:
match = False
elif "i" in self._opts:
# TODO: It is hard to define when a inword snippet could match,
# therefore we check only for full-word trigger.
match = self._t.startswith(words)
else:
match = self._t.startswith(words)
# By default, we match the words from the trigger
if match and not self._matched:
self._matched = words
# Ensure the match was on a word boundry if needed
if "b" in self._opts and match:
text_before = trigger.rstrip()[:-len(self._matched)]
if text_before.strip(" \t") != '':
self._matched = ""
return False
return match
@property
def overwrites_previous(self):
return "!" in self._opts
@property
def description(self):
return ("(%s) %s" % (self._t, self._d)).strip()
@property
def trigger(self):
return self._t
@property
def matched(self):
""" The last text that was matched. """
return self._matched
def launch(self, text_before, visual_content, parent, start, end):
indent = self._INDENT.match(text_before).group(0)
lines = (self._v + "\n").splitlines()
ind_util = IndentUtil()
# Replace leading tabs in the snippet definition via proper indenting
v = []
for line_num, line in enumerate(lines):
if "t" in self._opts:
tabs = 0
else:
tabs = len(self._TABS.match(line).group(0))
line_ind = ind_util.ntabs_to_proper_indent(tabs)
if line_num != 0:
line_ind = indent + line_ind
v.append(line_ind + line[tabs:])
v = '\n'.join(v)
si = SnippetInstance(self, parent, indent, v, start, end, visual_content,
last_re = self._last_re, globals = self._globals)
return si
class VisualContentPreserver(object):
def __init__(self):
self.reset()
def reset(self):
self._mode = ""
self._text = as_unicode("")
def conserve(self):
sl, sbyte = map(int, (_vim.eval("""line("'<")"""), _vim.eval("""col("'<")""")))
el, ebyte = map(int, (_vim.eval("""line("'>")"""), _vim.eval("""col("'>")""")))
sc = byte2col(sl, sbyte - 1)
ec = byte2col(el, ebyte - 1)
self._mode = _vim.eval("visualmode()")
def _vim_line_with_eol(ln):
return _vim.buf[ln] + '\n'
if sl == el:
text = _vim_line_with_eol(sl-1)[sc:ec+1]
else:
text = _vim_line_with_eol(sl-1)[sc:]
for cl in range(sl,el-1):
text += _vim_line_with_eol(cl)
text += _vim_line_with_eol(el-1)[:ec+1]
self._text = text
@property
def text(self):
return self._text
@property
def mode(self):
return self._mode
class _VimPosition(Position):
def __init__(self):
pos = _vim.buf.cursor
self._mode = _vim.eval("mode()")
self._visualmode = _vim.eval("visualmode()")
Position.__init__(self, pos.line, pos.col)
@property
def mode(self):
return self._mode
@property
def visualmode(self):
return self._visualmode
class VimState(object):
def __init__(self):
"""
This class caches some state information from Vim to better
guess what editing tasks the user might have done in the last step
"""
self._poss = deque(maxlen=5)
self._lvb = None
def remember_position(self):
self._poss.append(_VimPosition())
def remember_buffer(self, to):
self._lvb = _vim.buf[to.start.line:to.end.line+1]
self._lvb_len = len(_vim.buf)
self.remember_position()
@property
def diff_in_buffer_length(self):
return len(_vim.buf) - self._lvb_len
@property
def pos(self):
return self._poss[-1]
@property
def ppos(self):
return self._poss[-2]
@property
def remembered_buffer(self):
return self._lvb[:]
class SnippetManager(object):
def __init__(self):
self._supertab_keys = None
self._csnippets = []
self.reset()
@err_to_scratch_buffer
def reset(self, test_error=False):
self._vstate = VimState()
self._test_error = test_error
self._snippets = {}
self._filetypes = defaultdict(lambda: ['all'])
self._visual_content = VisualContentPreserver()
while len(self._csnippets):
self._current_snippet_is_done()
self._reinit()
@err_to_scratch_buffer
def jump_forwards(self):
_vim.command("let g:ulti_jump_forwards_res = 1")
if not self._jump():
_vim.command("let g:ulti_jump_forwards_res = 0")
return self._handle_failure(self.forward_trigger)
@err_to_scratch_buffer
def jump_backwards(self):
_vim.command("let g:ulti_jump_backwards_res = 1")
if not self._jump(True):
_vim.command("let g:ulti_jump_backwards_res = 0")
return self._handle_failure(self.backward_trigger)
@err_to_scratch_buffer
def expand(self):
_vim.command("let g:ulti_expand_res = 1")
if not self._try_expand():
_vim.command("let g:ulti_expand_res = 0")
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def list_snippets_dict(self):
before, after = _vim.buf.current_line_splitted
snippets = self._snips(before, True)
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
for snip in snippets:
description = snip.description[snip.description.find(snip.trigger) +
len(snip.trigger) + 2:]
key = as_unicode(snip.trigger)
description = as_unicode(description)
#remove surrounding "" or '' in snippet description if it exists
if len(description) > 2:
if description[0] == description[-1] and description[0] in ['"', "'"]:
description = description[1:-1]
_vim.command(as_unicode("let g:current_ulti_dict['{key}'] = '{val}'").format(
key=key.replace("'", "''"), val=description.replace("'", "''")))
@err_to_scratch_buffer
def list_snippets(self):
before, after = _vim.buf.current_line_splitted
snippets = self._snips(before, True)
if len(snippets) == 0:
self._handle_failure(self.backward_trigger)
return True
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
if not snippets:
return True
snippet = self._ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before, after)
return True
@err_to_scratch_buffer
def expand_or_jump(self):
"""
This function is used for people who wants to have the same trigger for
expansion and forward jumping. It first tries to expand a snippet, if
this fails, it tries to jump forward.
"""
_vim.command('let g:ulti_expand_or_jump_res = 1')
rv = self._try_expand()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 2')
rv = self._jump()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 0')
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def save_last_visual_selection(self):
"""
This is called when the expand trigger is pressed in visual mode.
Our job is to remember everything between '< and '> and pass it on to
${VISUAL} in case it will be needed.
"""
self._visual_content.conserve()
def snippet_dict(self, ft):
if ft not in self._snippets:
self._snippets[ft] = _SnippetDictionary()
return self._snippets[ft]
@err_to_scratch_buffer
def add_snippet(self, trigger, value, descr, options, ft = "all", globals = None, fn=None):
l = self.snippet_dict(ft).add_snippet(
Snippet(trigger, value, descr, options, globals or {}), fn
)
@err_to_scratch_buffer
def add_snippet_file(self, ft, path):
sd = self.snippet_dict(ft)
sd.addfile(path)
@err_to_scratch_buffer
def expand_anon(self, value, trigger="", descr="", options="", globals=None):
if globals is None:
globals = {}
before, after = _vim.buf.current_line_splitted
snip = Snippet(trigger, value, descr, options, globals)
if not trigger or snip.matches(before):
self._do_snippet(snip, before, after)
return True
else:
return False
@err_to_scratch_buffer
def clear_snippets(self, triggers = [], ft = "all"):
if ft in self._snippets:
self._snippets[ft].clear_snippets(triggers)
@err_to_scratch_buffer
def add_extending_info(self, ft, parents):
sd = self.snippet_dict(ft)
for p in parents:
if p in sd.extends:
continue
sd.extends.append(p)
@err_to_scratch_buffer
def cursor_moved(self):
self._vstate.remember_position()
if _vim.eval("mode()") not in 'in':
return
if self._ignore_movements:
self._ignore_movements = False
return
if self._csnippets:
cstart = self._csnippets[0].start.line
cend = self._csnippets[0].end.line + self._vstate.diff_in_buffer_length
ct = _vim.buf[cstart:cend + 1]
lt = self._vstate.remembered_buffer
pos = _vim.buf.cursor
lt_span = [0, len(lt)]
ct_span = [0, len(ct)]
initial_line = cstart
# Cut down on lines searched for changes. Start from behind and
# remove all equal lines. Then do the same from the front.
if lt and ct:
while (lt[lt_span[1]-1] == ct[ct_span[1]-1] and
self._vstate.ppos.line < initial_line + lt_span[1]-1 and pos.line < initial_line + ct_span[1]-1 and
(lt_span[0] < lt_span[1]) and
(ct_span[0] < ct_span[1])):
ct_span[1] -= 1
lt_span[1] -= 1
while (lt_span[0] < lt_span[1] and
ct_span[0] < ct_span[1] and
lt[lt_span[0]] == ct[ct_span[0]] and
self._vstate.ppos.line >= initial_line and pos.line >= initial_line):
ct_span[0] += 1
lt_span[0] += 1
initial_line += 1
ct_span[0] = max(0, ct_span[0] - 1)
lt_span[0] = max(0, lt_span[0] - 1)
initial_line = max(cstart, initial_line - 1)
lt = lt[lt_span[0]:lt_span[1]]
ct = ct[ct_span[0]:ct_span[1]]
try:
rv, es = guess_edit(initial_line, lt, ct, self._vstate)
if not rv:
lt = '\n'.join(lt)
ct = '\n'.join(ct)
es = diff(lt, ct, initial_line)
self._csnippets[0].replay_user_edits(es)
except IndexError:
pass # Rather do nothing than throwing an error. It will be correct most of the time
self._check_if_still_inside_snippet()
if self._csnippets:
self._csnippets[0].update_textobjects()
self._vstate.remember_buffer(self._csnippets[0])
def leaving_buffer(self):
"""
Called when the user switches tabs/windows/buffers. It basically means
that all snippets must be properly terminated
"""
while len(self._csnippets):
self._current_snippet_is_done()
self._reinit()
###################################
# Private/Protect Functions Below #
###################################
def _error(self, msg):
msg = _vim.escape("UltiSnips: " + msg)
if self._test_error:
msg = msg.replace('"', r'\"')
msg = msg.replace('|', r'\|')
_vim.command("let saved_pos=getpos('.')")
_vim.command("$:put =%s" % msg)
_vim.command("call setpos('.', saved_pos)")
elif False:
_vim.command("echohl WarningMsg")
_vim.command("echomsg %s" % msg)
_vim.command("echohl None")
else:
_vim.command("echoerr %s" % msg)
def _reinit(self):
self._ctab = None
self._ignore_movements = False
def _check_if_still_inside_snippet(self):
# Did we leave the snippet with this movement?
if self._cs and (
not self._cs.start <= _vim.buf.cursor <= self._cs.end
):
self._current_snippet_is_done()
self._reinit()
self._check_if_still_inside_snippet()
def _current_snippet_is_done(self):
self._csnippets.pop()
def _jump(self, backwards = False):
jumped = False
if self._cs:
self._ctab = self._cs.select_next_tab(backwards)
if self._ctab:
before, after = _vim.buf.current_line_splitted
if self._cs.snippet.has_option("s"):
if after == "":
m = re.match(r'(.*?)\s+$', before)
if m:
lineno = _vim.buf.cursor.line
_vim.text_to_vim(Position(lineno,0), Position(
lineno,len(before)+len(after)), m.group(1))
_vim.select(self._ctab.start, self._ctab.end)
jumped = True
if self._ctab.no == 0:
self._current_snippet_is_done()
else:
# This really shouldn't happen, because a snippet should
# have been popped when its final tabstop was used.
# Cleanup by removing current snippet and recursing.
self._current_snippet_is_done()
jumped = self._jump(backwards)
if jumped:
self._vstate.remember_position()
return jumped
def _handle_failure(self, trigger):
"""
Mainly make sure that we play well with SuperTab
"""
if trigger.lower() == "<tab>":
feedkey = "\\" + trigger
elif trigger.lower() == "<s-tab>":
feedkey = "\\" + trigger
else:
feedkey = None
mode = "n"
if not self._supertab_keys:
if _vim.eval("exists('g:SuperTabMappingForward')") != "0":
self._supertab_keys = (
_vim.eval("g:SuperTabMappingForward"),
_vim.eval("g:SuperTabMappingBackward"),
)
else:
self._supertab_keys = [ '', '' ]
for idx, sttrig in enumerate(self._supertab_keys):
if trigger.lower() == sttrig.lower():
if idx == 0:
feedkey= r"\<Plug>SuperTabForward"
mode = "n"
elif idx == 1:
feedkey = r"\<Plug>SuperTabBackward"
mode = "p"
# Use remap mode so SuperTab mappings will be invoked.
break
if feedkey == r"\<Plug>SuperTabForward" or feedkey == r"\<Plug>SuperTabBackward":
_vim.command("return SuperTab(%s)" % _vim.escape(mode))
elif feedkey:
_vim.command("return %s" % _vim.escape(feedkey))
def _snips(self, before, possible):
""" Returns all the snippets for the given text
before the cursor. If possible is True, then get all
possible matches.
"""
self._ensure_all_loaded()
filetypes = self._filetypes[_vim.buf.nr][::-1]
found_snippets = []
for ft in filetypes:
found_snippets += self._find_snippets(ft, before, possible)
# Search if any of the snippets overwrites the previous
# Dictionary allows O(1) access for easy overwrites
snippets = {}
for s in found_snippets:
if (s.trigger not in snippets) or s.overwrites_previous:
snippets[s.trigger] = []
snippets[s.trigger].append(s)
# Transform dictionary into flat list of snippets
selected_snippets = set([item for sublist in snippets.values() for item in sublist])
# Return snippets to their original order
snippets = [snip for snip in found_snippets if snip in selected_snippets]
return snippets
def _ask_snippets(self, snippets):
""" Given a list of snippets, ask the user which one they
want to use, and return it.
"""
# make a python list
display = [ as_unicode("%i: %s") % (i+1,s.description) for i,s in enumerate(snippets)]
try:
rv = _vim.eval("inputlist(%s)" % _vim.escape(display))
if rv is None or rv == '0':
return None
rv = int(rv)
if rv > len(snippets):
rv = len(snippets)
return snippets[rv-1]
except _vim.error as e:
# Likely "invalid expression", but might be translated. We have no way
# of knowing the exact error, therefore, we ignore all errors silently.
return None
def _do_snippet(self, snippet, before, after):
""" Expands the given snippet, and handles everything
that needs to be done with it.
"""
# Adjust before, maybe the trigger is not the complete word
text_before = before
if snippet.matched:
text_before = before[:-len(snippet.matched)]
if self._cs:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
# It could be that our trigger contains the content of TextObjects
# in our containing snippet. If this is indeed the case, we have to
# make sure that those are properly killed. We do this by
# pretending that the user deleted and retyped the text that our
# trigger matched.
edit_actions = [
("D", start.line, start.col, snippet.matched),
("I", start.line, start.col, snippet.matched),
]
self._csnippets[0].replay_user_edits(edit_actions)
si = snippet.launch(text_before, self._visual_content,
self._cs.find_parent_for_new_to(start), start, end)
else:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
si = snippet.launch(text_before, self._visual_content, None, start, end)
self._visual_content.reset()
self._csnippets.append(si)
self._ignore_movements = True
self._vstate.remember_buffer(self._csnippets[0])
self._jump()
def _try_expand(self):
before, after = _vim.buf.current_line_splitted
if not before:
return False
snippets = self._snips(before, False)
if not snippets:
# No snippet found
return False
elif len(snippets) == 1:
snippet = snippets[0]
else:
snippet = self._ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before, after)
return True
@property
def _cs(self):
if not len(self._csnippets):
return None
return self._csnippets[-1]
def _parse_snippets(self, ft, fn, file_data=None):
self.add_snippet_file(ft, fn)
_SnippetsFileParser(ft, fn, self, file_data).parse()
def base_snippet_files_for(self, ft, default=True):
""" Returns a list of snippet files matching the given filetype (ft).
If default is set to false, it doesn't include shipped files.
Searches through each path in 'runtimepath' in reverse order,
in each of these, it searches each directory name listed in
'g:UltiSnipsSnippetDirectories' in order, then looks for files in these
directories called 'ft.snippets' or '*_ft.snippets' replacing ft with
the filetype.
"""
if _vim.eval("exists('b:UltiSnipsSnippetDirectories')") == "1":
snippet_dirs = _vim.eval("b:UltiSnipsSnippetDirectories")
else:
snippet_dirs = _vim.eval("g:UltiSnipsSnippetDirectories")
base_snippets = os.path.realpath(os.path.join(__file__, "../../../UltiSnips"))
ret = []
paths = _vim.eval("&runtimepath").split(',')
if _vim.eval("exists('g:UltiSnipsDontReverseSearchPath')") == "0" or \
_vim.eval("g:UltiSnipsDontReverseSearchPath") == "0":
paths = paths[::-1]
for rtp in paths:
for snippet_dir in snippet_dirs:
pth = os.path.realpath(os.path.expanduser(os.path.join(rtp, snippet_dir)))
patterns = ["%s.snippets", "%s_*.snippets", os.path.join("%s","*")]
if not default and pth == base_snippets:
patterns.remove("%s.snippets")
for pattern in patterns:
for fn in glob.glob(os.path.join(pth, pattern % ft)):
if fn not in ret:
ret.append(fn)
return ret
@property
def primary_filetype(self):
""" Property for the primary filetype. This filetype
will be edited when UltiSnipsEdit is called
without any arguments.
"""
return self._filetypes[_vim.buf.nr][0]
def file_to_edit(self, ft):
""" Gets a file to edit based on the given filetype.
If no filetype is given, uses the current filetype from Vim.
Checks 'g:UltiSnipsSnippetsDir' and uses it if it exists
If a non-shipped file already exists, it uses it.
Otherwise uses a file in ~/.vim/ or ~/vimfiles
"""
edit = None
existing = self.base_snippet_files_for(ft, False)
filename = ft + ".snippets"
if _vim.eval("exists('g:UltiSnipsSnippetsDir')") == "1":
snipdir = _vim.eval("g:UltiSnipsSnippetsDir")
edit = os.path.join(snipdir, filename)
elif existing:
edit = existing[-1] # last sourced/highest priority
else:
home = _vim.eval("$HOME")
rtp = [ os.path.realpath(os.path.expanduser(p)) for p in _vim.eval("&rtp").split(",") ]
snippet_dirs = ["UltiSnips"] + _vim.eval("g:UltiSnipsSnippetDirectories")
us = snippet_dirs[-1]
path = os.path.join(home, ".vim", us)
for dirname in [".vim", "vimfiles"]:
pth = os.path.join(home, dirname)
if pth in rtp:
path = os.path.join(pth, us)
if not os.path.isdir(path):
os.mkdir(path)
edit = os.path.join(path, filename)
return edit
# Loading
def _load_snippets_for(self, ft):
self.snippet_dict(ft).reset()
for fn in self.base_snippet_files_for(ft):
self._parse_snippets(ft, fn)
# Now load for the parents
for p in self._snippets[ft].extends:
if p not in self._snippets:
self._load_snippets_for(p)
def _needs_update(self, ft):
do_hash = _vim.eval('exists("g:UltiSnipsDoHash")') == "0" \
or _vim.eval("g:UltiSnipsDoHash") != "0"
if ft not in self._snippets:
return True
elif do_hash and self.snippet_dict(ft).needs_update():
return True
elif do_hash:
cur_snips = set(self.base_snippet_files_for(ft))
old_snips = set(self.snippet_dict(ft).files)
if cur_snips - old_snips:
return True
return False
def _ensure_loaded(self, ft, checked=None):
if not checked:
checked = set([ft])
elif ft in checked:
return
else:
checked.add(ft)
if self._needs_update(ft):
self._load_snippets_for(ft)
for parent in self.snippet_dict(ft).extends:
self._ensure_loaded(parent, checked)
def _ensure_all_loaded(self):
for ft in self._filetypes[_vim.buf.nr]:
self._ensure_loaded(ft)
def reset_buffer_filetypes(self):
if _vim.buf.nr in self._filetypes:
del self._filetypes[_vim.buf.nr]
def add_buffer_filetypes(self, ft):
""" Checks for changes in the list of snippet files or the contents
of the snippet files and reloads them if necessary.
"""
buf_fts = self._filetypes[_vim.buf.nr]
idx = -1
for ft in ft.split("."):
ft = ft.strip()
if not ft: continue
try:
idx = buf_fts.index(ft)
except ValueError:
self._filetypes[_vim.buf.nr].insert(idx + 1, ft)
idx += 1
self._ensure_all_loaded()
def _find_snippets(self, ft, trigger, potentially = False, seen=None):
"""
Find snippets matching trigger
ft - file type to search
trigger - trigger to match against
potentially - also returns snippets that could potentially match; that
is which triggers start with the current trigger
"""
snips = self._snippets.get(ft,None)
if not snips:
return []
if not seen:
seen = []
seen.append(ft)
parent_results = []
for p in snips.extends:
if p not in seen:
seen.append(p)
parent_results += self._find_snippets(p, trigger,
potentially, seen)
return parent_results + snips.get_matching_snippets(
trigger, potentially)
UltiSnips_Manager = SnippetManager()
| apache-2.0 |
JioCloud/nova | nova/virt/hyperv/constants.py | 31 | 2347 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Constants used in ops classes
"""
from nova.compute import arch
from nova.compute import power_state
HYPERV_VM_STATE_ENABLED = 2
HYPERV_VM_STATE_DISABLED = 3
HYPERV_VM_STATE_SHUTTING_DOWN = 4
HYPERV_VM_STATE_REBOOT = 10
HYPERV_VM_STATE_PAUSED = 32768
HYPERV_VM_STATE_SUSPENDED = 32769
HYPERV_POWER_STATE = {
HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
}
WMI_WIN32_PROCESSOR_ARCHITECTURE = {
0: arch.I686,
1: arch.MIPS,
2: arch.ALPHA,
3: arch.PPC,
5: arch.ARMV7,
6: arch.IA64,
9: arch.X86_64,
}
PROCESSOR_FEATURE = {
7: '3dnow',
3: 'mmx',
12: 'nx',
9: 'pae',
8: 'rdtsc',
20: 'slat',
13: 'sse3',
21: 'vmx',
6: 'sse',
10: 'sse2',
17: 'xsave',
}
WMI_JOB_STATUS_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
VM_SUMMARY_NUM_PROCS = 4
VM_SUMMARY_ENABLED_STATE = 100
VM_SUMMARY_MEMORY_USAGE = 103
VM_SUMMARY_UPTIME = 105
CTRL_TYPE_IDE = "IDE"
CTRL_TYPE_SCSI = "SCSI"
DISK = "VHD"
DISK_FORMAT = DISK
DVD = "DVD"
DVD_FORMAT = "ISO"
DISK_FORMAT_MAP = {
DISK_FORMAT.lower(): DISK,
DVD_FORMAT.lower(): DVD
}
DISK_FORMAT_VHD = "VHD"
DISK_FORMAT_VHDX = "VHDX"
VHD_TYPE_FIXED = 2
VHD_TYPE_DYNAMIC = 3
SCSI_CONTROLLER_SLOTS_NUMBER = 64
HOST_POWER_ACTION_SHUTDOWN = "shutdown"
HOST_POWER_ACTION_REBOOT = "reboot"
HOST_POWER_ACTION_STARTUP = "startup"
IMAGE_PROP_VM_GEN = "hw_machine_type"
IMAGE_PROP_VM_GEN_1 = "hyperv-gen1"
IMAGE_PROP_VM_GEN_2 = "hyperv-gen2"
VM_GEN_1 = 1
VM_GEN_2 = 2
| apache-2.0 |
FireCARES/data | sources/nfirs/scripts/jupyter_notebook_config.py | 1 | 25267 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = u''
## Specify a config file to load.
#c.JupyterApp.config_file_name = u''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = u''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = u''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = u''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = ''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = u''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '192.168.33.15'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = u''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = u''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = u''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8007
## The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
# 2 opens a new tab,
# 1 opens a new window,
# 0 opens in an existing window.
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = u''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = ''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = u''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = u'vagrant'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python2'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. The minimum timeout is 300
# seconds (5 minutes). Positive values less than the minimum value will be set
# to the minimum.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = [u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = u''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = u''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = ''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = u''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set([])
| mit |
annegentle/magnum | magnum/api/controllers/v1/replicationcontroller.py | 7 | 14010 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import link
from magnum.api.controllers.v1 import base as v1_base
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import exception
from magnum.common import k8s_manifest
from magnum.common import policy
from magnum import objects
class ReplicationControllerPatchType(v1_base.K8sPatchType):
@staticmethod
def internal_attrs():
defaults = v1_base.K8sPatchType.internal_attrs()
return defaults + ['/replicas']
class ReplicationController(v1_base.K8sResourceBase):
"""API representation of a ReplicationController.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a
ReplicationController.
"""
uuid = types.uuid
"""Unique UUID for this ReplicationController"""
images = [wtypes.text]
"""A list of images used by containers in this ReplicationController."""
replicas = wsme.wsattr(wtypes.IntegerType(), readonly=True)
"""Replicas of this ReplicationController"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated rc links"""
def __init__(self, **kwargs):
super(ReplicationController, self).__init__()
self.fields = []
for field in objects.ReplicationController.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(rc, url, expand=True):
if not expand:
rc.unset_fields_except(['uuid', 'name', 'images', 'bay_uuid',
'labels', 'replicas'])
rc.links = [link.Link.make_link('self', url,
'rcs', rc.uuid),
link.Link.make_link('bookmark', url,
'rcs', rc.uuid,
bookmark=True)]
return rc
@classmethod
def convert_with_links(cls, rpc_rc, expand=True):
rc = ReplicationController(**rpc_rc.as_dict())
return cls._convert_with_links(rc, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='f978db47-9a37-4e9f-8572-804a10abc0aa',
name='MyReplicationController',
images=['MyImage'],
bay_uuid='f978db47-9a37-4e9f-8572-804a10abc0ab',
labels={'name': 'foo'},
replicas=2,
manifest_url='file:///tmp/rc.yaml',
manifest='''{
"metadata": {
"name": "name_of_rc"
},
"spec":{
"replicas":2,
"selector":{
"name":"frontend"
},
"template":{
"metadata":{
"labels":{
"name":"frontend"
}
},
"spec":{
"containers":[
{
"name":"test-redis",
"image":"steak/for-dinner",
"ports":[
{
"containerPort":80,
"protocol":"TCP"
}
]
}
]
}
}
}
}''',
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def parse_manifest(self):
try:
manifest = k8s_manifest.parse(self._get_manifest())
except ValueError as e:
raise exception.InvalidParameterValue(message=str(e))
try:
self.name = manifest["metadata"]["name"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field metadata['name'] can't be empty in manifest.")
try:
self.replicas = manifest["spec"]["replicas"]
except (KeyError, TypeError):
pass
try:
self.selector = manifest["spec"]["selector"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['selector'] can't be empty in manifest.")
try:
self.labels = manifest["spec"]["template"]["metadata"]["labels"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['template']['metadata']['labels'] "
"can't be empty in manifest.")
try:
images = []
for cont in manifest["spec"]["template"]["spec"]["containers"]:
images.append(cont["image"])
self.images = images
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['template']['spec']['containers'] "
"can't be empty in manifest.")
class ReplicationControllerCollection(collection.Collection):
"""API representation of a collection of ReplicationControllers."""
rcs = [ReplicationController]
"""A list containing ReplicationController objects"""
def __init__(self, **kwargs):
self._type = 'rcs'
@staticmethod
def convert_with_links(rpc_rcs, limit, url=None, expand=False, **kwargs):
collection = ReplicationControllerCollection()
collection.rcs = [ReplicationController.convert_with_links(p, expand)
for p in rpc_rcs]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.rcs = [ReplicationController.sample(expand=False)]
return sample
class ReplicationControllersController(rest.RestController):
"""REST controller for ReplicationControllers."""
def __init__(self):
super(ReplicationControllersController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _get_rcs_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.ReplicationController.get_by_uuid(
pecan.request.context,
marker)
rcs = pecan.request.rpcapi.rc_list(
pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return ReplicationControllerCollection.convert_with_links(
rcs, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@policy.enforce_wsgi("rc")
@expose.expose(ReplicationControllerCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, rc_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ReplicationControllers.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_rcs_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("rc")
@expose.expose(ReplicationControllerCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, rc_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of ReplicationControllers with detail.
:param rc_uuid: UUID of a ReplicationController, to get only
ReplicationControllers for the ReplicationController.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(jay-lau-513): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "rcs":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['rcs', 'detail'])
return self._get_rcs_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("rc", "get")
@expose.expose(ReplicationController, types.uuid_or_name)
def get_one(self, rc_ident):
"""Retrieve information about the given ReplicationController.
:param rc_ident: UUID or logical name of a ReplicationController.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
return ReplicationController.convert_with_links(rpc_rc)
@policy.enforce_wsgi("rc", "create")
@expose.expose(ReplicationController, body=ReplicationController,
status_code=201)
@validation.enforce_bay_types('kubernetes')
def post(self, rc):
"""Create a new ReplicationController.
:param rc: a ReplicationController within the request body.
"""
rc.parse_manifest()
rc_dict = rc.as_dict()
context = pecan.request.context
rc_dict['project_id'] = context.project_id
rc_dict['user_id'] = context.user_id
rc_obj = objects.ReplicationController(context, **rc_dict)
new_rc = pecan.request.rpcapi.rc_create(rc_obj)
if not new_rc:
raise exception.InvalidState()
# Set the HTTP Location Header
pecan.response.location = link.build_url('rcs', new_rc.uuid)
return ReplicationController.convert_with_links(new_rc)
@policy.enforce_wsgi("rc", "update")
@wsme.validate(types.uuid, [ReplicationControllerPatchType])
@expose.expose(ReplicationController, types.uuid_or_name,
body=[ReplicationControllerPatchType])
def patch(self, rc_ident, patch):
"""Update an existing rc.
:param rc_ident: UUID or logical name of a ReplicationController.
:param patch: a json PATCH document to apply to this rc.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
# Init manifest and manifest_url field because we don't store them
# in database.
rpc_rc['manifest'] = None
rpc_rc['manifest_url'] = None
try:
rc_dict = rpc_rc.as_dict()
rc = ReplicationController(**api_utils.apply_jsonpatch(rc_dict,
patch))
if rc.manifest or rc.manifest_url:
rc.parse_manifest()
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.ReplicationController.fields:
try:
patch_val = getattr(rc, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_rc[field] != patch_val:
rpc_rc[field] = patch_val
if rc.manifest or rc.manifest_url:
pecan.request.rpcapi.rc_update(rpc_rc)
else:
rpc_rc.save()
return ReplicationController.convert_with_links(rpc_rc)
@policy.enforce_wsgi("rc")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, rc_ident):
"""Delete a ReplicationController.
:param rc_uuid: UUID of a ReplicationController.
"""
rpc_rc = api_utils.get_rpc_resource('ReplicationController', rc_ident)
pecan.request.rpcapi.rc_delete(rpc_rc.uuid)
| apache-2.0 |
Krossom/python-for-android | python3-alpha/python3-src/Lib/urllib/error.py | 48 | 2264 | """Exception classes raised by urllib.
The base exception class is URLError, which inherits from IOError. It
doesn't define any behavior of its own, but is the base class for all
exceptions defined in this package.
HTTPError is an exception class that is also a valid HTTP response
instance. It behaves this way because HTTP protocol errors are valid
responses, with a status code, headers, and a body. In some contexts,
an application may want to handle an exception like a regular
response.
"""
import urllib.response
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason, filename=None):
self.args = reason,
self.reason = reason
if filename is not None:
self.filename = filename
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, urllib.response.addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = urllib.response.addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# exception raised when downloaded size does not match content-length
class ContentTooShortError(URLError):
def __init__(self, message, content):
URLError.__init__(self, message)
self.content = content
| apache-2.0 |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/test/test_sched.py | 92 | 6588 | import queue
import sched
import time
import unittest
from test import support
try:
import threading
except ImportError:
threading = None
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def test_queue(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
flag = []
def fun(*a, **b):
flag.append(None)
self.assertEqual(a, (1,2,3))
self.assertEqual(b, {"foo":1})
scheduler = sched.scheduler(time.time, time.sleep)
z = scheduler.enterabs(0.01, 1, fun, argument=(1,2,3), kwargs={"foo":1})
scheduler.run()
self.assertEqual(flag, [None])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
mit-ll/python-keylime | keylime/ca_impl_cfssl.py | 1 | 6870 | '''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import base64
import os
import subprocess
import socket
import time
import requests
import shutil
import sys
import simplejson as json
from keylime import config
from keylime import keylime_logging
from keylime import secure_mount
from M2Crypto import EVP, X509
logger = keylime_logging.init_logging('ca_impl_cfssl')
cfssl_ip = config.get('ca', 'cfssl_ip')
cfssl_port = config.get('ca', 'cfssl_port')
cfsslproc = None
def post_cfssl(params, data):
numtries = 0
maxr = 10
retry = 0.2
while True:
try:
response = requests.post(
"http://%s:%s/%s" % (cfssl_ip, cfssl_port, params), json=data, timeout=1)
break
except requests.exceptions.ConnectionError as e:
numtries += 1
if numtries >= maxr:
logger.error(
"Quiting after max number of retries to connect to cfssl server")
raise e
logger.info(
"Connection to cfssl refused %d/%d times, trying again in %f seconds..." % (numtries, maxr, retry))
time.sleep(retry)
continue
if response.status_code != 200:
raise Exception("Unable to issue CFSSL API command %s: %s" %
(params, response.text))
return response.json()
def start_cfssl(cmdline=""):
if shutil.which("cfssl") is None:
logger.error(
"cfssl binary not found in the path. Please install cfssl or change the setting \"ca_implementation\" in keylime.conf")
sys.exit(1)
global cfsslproc
cmd = "cfssl serve -loglevel=1 %s " % cmdline
env = os.environ.copy()
env['PATH'] = env['PATH'] + ":/usr/local/bin"
# make sure cfssl isn't running
os.system('pkill -f cfssl')
cfsslproc = subprocess.Popen(cmd, env=env, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
if cfsslproc.returncode is not None:
raise Exception("Unable to launch %s: failed with code %d" %
(cmd, cfsslproc.returncode))
logger.debug("Waiting for cfssl to start...")
while True:
line = cfsslproc.stdout.readline()
if "Now listening on" in line:
break
time.sleep(0.2) # give cfssl a little more time to get started
logger.debug("cfssl started successfully")
def stop_cfssl():
global cfsslproc
if cfsslproc is not None:
cfsslproc.kill()
os.system("pkill -f cfssl")
cfsslproc = None
def mk_cacert():
csr = {"CN": config.get('ca', 'cert_ca_name'),
"key": {
"algo": "rsa",
"size": config.getint('ca', 'cert_bits')
},
"names": [
{
"C": config.get('ca', 'cert_country'),
"L": config.get('ca', 'cert_locality'),
"O": config.get('ca', 'cert_organization'),
"OU": config.get('ca', 'cert_org_unit'),
"ST": config.get('ca', 'cert_state')
}
]
}
try:
start_cfssl()
body = post_cfssl('api/v1/cfssl/init_ca', csr)
finally:
stop_cfssl()
if body['success']:
pk_str = body['result']['private_key']
pk = EVP.load_key_string(body['result']['private_key'].encode('utf-8'))
cert = X509.load_cert_string(
body['result']['certificate'].encode('utf-8'))
pkey = cert.get_pubkey()
return pk_str, cert, pk, pkey
else:
raise Exception("Unable to create CA")
def mk_signed_cert(cacert, ca_pk, name, serialnum):
csr = {"request": {
"CN": name,
"hosts": [
name,
],
"key": {
"algo": "rsa",
"size": config.getint('ca', 'cert_bits')
},
"names": [
{
"C": config.get('ca', 'cert_country'),
"L": config.get('ca', 'cert_locality'),
"O": config.get('ca', 'cert_organization'),
"OU": config.get('ca', 'cert_org_unit'),
"ST": config.get('ca', 'cert_state')
}
]
}
}
# check CRL distribution point
disturl = config.get('ca', 'cert_crl_dist')
if disturl == 'default':
disturl = "http://%s:%s/crl.der" % (socket.getfqdn(), config.CRL_PORT)
# set up config for cfssl server
cfsslconfig = {
"signing": {
"default": {
"usages": ["client auth", "server auth", "key agreement", "key encipherment", "signing", "digital signature", "data encipherment"],
"expiry": "8760h",
"crl_url": disturl,
}
}
}
secdir = secure_mount.mount()
try:
# need to temporarily write out the private key with no password
# to tmpfs
ca_pk.save_key('%s/ca-key.pem' % secdir, None)
with open('%s/cfsslconfig.yml' % secdir, 'w') as f:
json.dump(cfsslconfig, f)
cmdline = "-config=%s/cfsslconfig.yml" % secdir
priv_key = os.path.abspath("%s/ca-key.pem" % secdir)
cmdline += " -ca-key %s -ca cacert.crt" % (priv_key)
start_cfssl(cmdline)
body = post_cfssl('api/v1/cfssl/newcert', csr)
finally:
stop_cfssl()
os.remove('%s/ca-key.pem' % secdir)
os.remove('%s/cfsslconfig.yml' % secdir)
if body['success']:
pk = EVP.load_key_string(body['result']['private_key'].encode('utf-8'))
cert = X509.load_cert_string(
body['result']['certificate'].encode("utf-8"))
return cert, pk
else:
raise Exception("Unable to create cert for %s" % name)
def gencrl(serials, cert, ca_pk):
request = {"certificate": cert,
"serialNumber": serials,
"issuingKey": ca_pk,
"expireTime": ""
}
secdir = secure_mount.mount()
try:
# need to temporarily write out the private key with no password
# to tmpfs
priv_key = os.path.abspath("%s/ca-key.pem" % secdir)
with open(priv_key, 'w') as f:
f.write(ca_pk)
cmdline = " -ca-key %s -ca cacert.crt" % (priv_key)
start_cfssl(cmdline)
body = post_cfssl('api/v1/cfssl/gencrl', request)
finally:
stop_cfssl()
# replace with srm
os.remove('%s/ca-key.pem' % secdir)
if body['success']:
retval = base64.b64decode(body['result'])
else:
raise Exception("Unable to create crl for cert serials %s. Error: %s" % (
serials, body['errors']))
return retval
# ./cfssl gencrl revoke ca.pem ca-key.pem | base64 -D > mycrl.der
# mk_cacert()
# mk_signed_cert("", "", "hello", None)
| bsd-2-clause |
Aegeaner/spark | python/pyspark/sql/functions.py | 1 | 134163 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
def _create_name_function(name, doc=""):
""" Create a function that takes a column name argument, by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function(name, doc=""):
""" Create a function that takes a Column object, by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_name_functions = {
# name functions take a column name as their argument
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_name_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6 = {
# unary math functions
'stddev': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: returns the population variance of the values in a group.',
'var_samp': 'Aggregate function: returns the unbiased variance of the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1 = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _name_functions.items():
globals()[_name] = since(1.3)(_create_name_function(_name, _doc))
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6.items():
globals()[_name] = since(1.6)(_create_function(_name, _doc))
for _name, _doc in _functions_2_1.items():
globals()[_name] = since(2.1)(_create_function(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _name_functions_2_4.items():
globals()[_name] = since(2.4)(_create_name_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=1.1568609015300986),
Row(age=5, name=u'Bob', rand=1.403379671529166)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=-0.7556247885860078),
Row(age=5, name=u'Bob', randn=-0.0861619008451133)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(or starting from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, "a")).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(_to_java_column(col), extraction))
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""Returns a new row for each element with position in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options)
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), options)
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), options)
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25666's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa
# | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: 'X' means it throws an exception during the conversion.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
.. note:: Experimental
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
3. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25798's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa
# | string| None| u''|u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: Pandas 0.19.2 and PyArrow 0.9.0 are used.
# Note: Timezone is Singapore timezone.
# Note: 'X' means it throws an exception during the conversion.
# Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555).
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
martin-hunt/foobar | docs/conf.py | 1 | 8160 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../foobar")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'foobar'
copyright = u'2014, Martin Hunt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from foobar import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'foobar-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'foobar Documentation',
u'Martin Hunt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
claudep/pootle | tests/models/virtualfolder.py | 7 | 7012 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import absolute_import
from django.core.exceptions import ValidationError
import pytest
from pytest_pootle.factories import VirtualFolderDBFactory
from pootle_language.models import Language
from pootle_store.models import Store
from virtualfolder.models import VirtualFolder
@pytest.mark.django_db
def test_vfolder_priority_not_greater_than_zero(tp0):
"""Tests that the creation of a virtual folder fails if the provided
priority is not greater than zero.
"""
# Test priority less than zero.
vfolder_item = {
'name': "whatever",
'priority': -3,
'is_public': True,
'filter_rules': "browser/defines.po",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Priority must be greater than zero.' in str(excinfo.value)
# Test zero priority.
vfolder_item['priority'] = 1
vfolder = VirtualFolder.objects.create(**vfolder_item)
vfolder.priority = 0
with pytest.raises(ValidationError) as excinfo:
vfolder.save()
assert u'Priority must be greater than zero.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_with_no_filter_rules():
"""Tests that the creation of a virtual folder fails if it doesn't have any
filter rules.
"""
vfolder_item = {
'name': "whatever",
'priority': 4,
'is_public': True,
'filter_rules': "",
}
with pytest.raises(ValidationError) as excinfo:
VirtualFolder.objects.create(**vfolder_item)
assert u'Some filtering rule must be specified.' in str(excinfo.value)
vfolder_item["filter_rules"] = "FOO"
vf = VirtualFolder.objects.create(**vfolder_item)
vf.filter_rules = ""
with pytest.raises(ValidationError) as excinfo:
vf.save()
assert u'Some filtering rule must be specified.' in str(excinfo.value)
@pytest.mark.django_db
def test_vfolder_membership(tp0, store0):
tp0_stores = ",".join(
p[len(tp0.pootle_path):]
for p in tp0.stores.values_list("pootle_path", flat=True))
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
filter_rules=store0.name)
vf0.projects.add(tp0.project)
vf0.languages.add(tp0.language)
vf0.save()
assert vf0.stores.count() == 1
assert vf0.stores.first() == store0
vf1 = VirtualFolder.objects.create(
name="vf1",
title="the vf1",
filter_rules=tp0_stores)
vf1.projects.add(tp0.project)
vf1.languages.add(tp0.language)
vf1.save()
assert (
list(vf1.stores.order_by("pk"))
== list(tp0.stores.order_by("pk")))
store_name = vf1.filter_rules.split(",")[0]
vf1.filter_rules = ",".join(vf1.filter_rules.split(",")[1:])
store = vf1.stores.get(name=store_name)
vf1.save()
assert store not in vf1.stores.all()
vf1.filter_rules = ",".join([store_name, vf1.filter_rules])
vf1.save()
assert store in vf1.stores.all()
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_store_priorities(project0):
# remove the default vfolders and update units to reset priorities
VirtualFolder.objects.all().delete()
assert all(
priority == 1
for priority
in Store.objects.values_list("priority", flat=True))
vfolder0 = VirtualFolderDBFactory(filter_rules="store0.po", name="FOO")
vfolder0.priority = 3
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder0.filter_rules = "store1.po"
vfolder0.save()
vfolder0_stores = vfolder0.stores.values_list("pk", flat=True)
assert all(
priority == 3
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.values_list("priority", flat=True))
vfolder1 = VirtualFolderDBFactory(
filter_rules="store1.po")
vfolder1.languages.add(*Language.objects.all())
vfolder1.projects.add(project0)
vfolder1.priority = 4
vfolder1.save()
vfolder1_stores = vfolder1.stores.values_list("pk", flat=True)
assert all(
priority == 4.0
for priority
in Store.objects.filter(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 3.0
for priority
in Store.objects.filter(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
assert all(
priority == 1.0
for priority
in Store.objects.exclude(id__in=vfolder0_stores)
.exclude(id__in=vfolder1_stores)
.values_list("priority", flat=True))
@pytest.mark.django_db
def test_virtualfolder_repr():
vf = VirtualFolderDBFactory(filter_rules="store0.po")
assert (
"<VirtualFolder: %s>" % (vf.name)
== repr(vf))
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_calc_priority(settings, store0):
vf = store0.vfolders.first()
vf.priority = 5
vf.save()
assert store0.calculate_priority() == 5.0
settings.INSTALLED_APPS.remove("virtualfolder")
assert store0.calculate_priority() == 1.0
settings.INSTALLED_APPS.append("virtualfolder")
@pytest.mark.pootle_vfolders
@pytest.mark.django_db
def test_vfolder_membership_new_store(tp0):
vf0 = VirtualFolder.objects.create(
name="vf0",
title="the vf0",
priority=7.0,
all_languages=True,
all_projects=True,
filter_rules="wierd.po")
wierd_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="wierd.po")
wierd_store.set_priority()
assert wierd_store in vf0.stores.all()
assert Store.objects.get(pk=wierd_store.pk).priority == 7
normal_store = Store.objects.create(
parent=tp0.directory,
translation_project=tp0,
name="normal.po")
assert normal_store not in vf0.stores.all()
assert Store.objects.get(pk=normal_store.pk).priority == 1.0
vf0.delete()
assert Store.objects.get(pk=wierd_store.pk).priority == 1.0
| gpl-3.0 |
zestrada/nova-cs498cc | nova/tests/servicegroup/test_mc_servicegroup.py | 10 | 8818 | # Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
#
# This is derived from test_db_servicegroup.py.
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import fixtures
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import service
from nova import servicegroup
from nova import test
class ServiceFixture(fixtures.Fixture):
def __init__(self, host, binary, topic):
super(ServiceFixture, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.serv = None
def setUp(self):
super(ServiceFixture, self).setUp()
self.serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager',
1, 1)
self.addCleanup(self.serv.kill)
class MemcachedServiceGroupTestCase(test.TestCase):
def setUp(self):
super(MemcachedServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
self.flags(servicegroup_driver='mc')
self.down_time = 3
self.flags(enable_new_services=True)
self.flags(service_down_time=self.down_time)
self.servicegroup_api = servicegroup.API(test=True)
self._host = 'foo'
self._binary = 'nova-fake'
self._topic = 'unittest'
self._ctx = context.get_admin_context()
def test_memcached_driver(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
hostkey = str("%s:%s" % (self._topic, self._host))
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=self.down_time)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
eventlet.sleep(self.down_time + 1)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
serv.stop()
eventlet.sleep(self.down_time + 1)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
def test_get_all(self):
host1 = self._host + '_1'
host2 = self._host + '_2'
host3 = self._host + '_3'
serv1 = self.useFixture(
ServiceFixture(host1, self._binary, self._topic)).serv
serv1.start()
serv2 = self.useFixture(
ServiceFixture(host2, self._binary, self._topic)).serv
serv2.start()
serv3 = self.useFixture(
ServiceFixture(host3, self._binary, self._topic)).serv
serv3.start()
service_ref1 = db.service_get_by_args(self._ctx,
host1,
self._binary)
service_ref2 = db.service_get_by_args(self._ctx,
host2,
self._binary)
service_ref3 = db.service_get_by_args(self._ctx,
host3,
self._binary)
host1key = str("%s:%s" % (self._topic, host1))
host2key = str("%s:%s" % (self._topic, host2))
host3key = str("%s:%s" % (self._topic, host3))
self.servicegroup_api._driver.mc.set(host1key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host2key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host3key,
timeutils.utcnow(),
time=-1)
services = self.servicegroup_api.get_all(self._topic)
self.assertTrue(host1 in services)
self.assertTrue(host2 in services)
self.assertFalse(host3 in services)
service_id = self.servicegroup_api.get_one(self._topic)
self.assertTrue(service_id in services)
def test_service_is_up(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
self.servicegroup_api = servicegroup.API()
hostkey = str("%s:%s" % (self._topic, self._host))
# Up (equal)
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll()
# Up
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll()
# Down
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertFalse(result)
self.mox.ResetAll()
# Down
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertFalse(result)
self.mox.ResetAll()
def test_report_state(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.servicegroup_api = servicegroup.API()
# updating model_disconnected
serv.model_disconnected = True
self.servicegroup_api._driver._report_state(serv)
self.assertFalse(serv.model_disconnected)
# handling exception
serv.model_disconnected = True
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)
delattr(serv, 'model_disconnected')
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/263_test_doctest2.py | 77 | 2269 | # -*- coding: utf-8 -*-
u"""A module to test whether doctest recognizes some 2.2 features,
like static and class methods.
>>> print 'yup' # 1
yup
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
from test import test_support
class C(object):
u"""Class C.
>>> print C() # 2
42
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
def __init__(self):
"""C.__init__.
>>> print C() # 3
42
"""
def __str__(self):
"""
>>> print C() # 4
42
"""
return "42"
class D(object):
"""A nested D class.
>>> print "In D!" # 5
In D!
"""
def nested(self):
"""
>>> print 3 # 6
3
"""
def getx(self):
"""
>>> c = C() # 7
>>> c.x = 12 # 8
>>> print c.x # 9
-12
"""
return -self._x
def setx(self, value):
"""
>>> c = C() # 10
>>> c.x = 12 # 11
>>> print c.x # 12
-12
"""
self._x = value
x = property(getx, setx, doc="""\
>>> c = C() # 13
>>> c.x = 12 # 14
>>> print c.x # 15
-12
""")
@staticmethod
def statm():
"""
A static method.
>>> print C.statm() # 16
666
>>> print C().statm() # 17
666
"""
return 666
@classmethod
def clsm(cls, val):
"""
A class method.
>>> print C.clsm(22) # 18
22
>>> print C().clsm(23) # 19
23
"""
return val
def test_main():
from test import test_doctest2
EXPECTED = 19
f, t = test_support.run_doctest(test_doctest2)
if t != EXPECTED:
raise test_support.TestFailed("expected %d tests to run, not %d" %
(EXPECTED, t))
# Pollute the namespace with a bunch of imported functions and classes,
# to make sure they don't get tested.
from doctest import *
if __name__ == '__main__':
test_main()
| gpl-3.0 |
romain-dartigues/ansible | lib/ansible/modules/network/asa/asa_config.py | 20 | 11223 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: asa_config
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Manage configuration sections on Cisco ASA devices
description:
- Cisco ASA configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ASA configuration sections in
a deterministic way.
extends_documentation_fragment: asa
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
type: bool
default: 'no'
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
type: bool
default: 'no'
passwords:
description:
- This argument specifies to include passwords in the config
when retrieving the running-config from the remote device. This
includes passwords related to VPN endpoints. This argument is
mutually exclusive with I(defaults).
type: bool
default: 'no'
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
---
- asa_config:
lines:
- network-object host 10.80.30.18
- network-object host 10.80.30.19
- network-object host 10.80.30.20
parents: ['object-group network OG-MONITORED-SERVERS']
provider: "{{ cli }}"
- asa_config:
host: "{{ inventory_hostname }}"
lines:
- message-length maximum client auto
- message-length maximum 512
match: line
parents: ['policy-map type inspect dns PM-DNS', 'parameters']
authorize: yes
auth_pass: cisco
username: admin
password: cisco
context: ansible
- asa_config:
lines:
- ikev1 pre-shared-key MyS3cretVPNK3y
parents: tunnel-group 1.1.1.1 ipsec-attributes
passwords: yes
provider: "{{ cli }}"
- name: attach ASA acl on interface vlan13/nameif cloud13
asa_config:
lines:
- access-group cloud-acl_access_in in interface cloud13
provider: "{{ cli }}"
- name: configure ASA (>=9.2) default BGP
asa_config:
lines:
- bgp log-neighbor-changes
- bgp bestpath compare-routerid
provider: "{{ cli }}"
parents:
- router bgp 65002
register: bgp
when: bgp_default_config is defined
- name: configure ASA (>=9.2) BGP neighbor in default/single context mode
asa_config:
lines:
- "bgp router-id {{ bgp_router_id }}"
- "neighbor {{ bgp_neighbor_ip }} remote-as {{ bgp_neighbor_as }}"
- "neighbor {{ bgp_neighbor_ip }} description {{ bgp_neighbor_name }}"
provider: "{{ cli }}"
parents:
- router bgp 65002
- address-family ipv4 unicast
register: bgp
when: bgp_neighbor_as is defined
- name: configure ASA interface with standby
asa_config:
lines:
- description my cloud interface
- nameif cloud13
- security-level 50
- ip address 192.168.13.1 255.255.255.0 standby 192.168.13.2
provider: "{{ cli }}"
parents: ["interface Vlan13"]
register: interface
- name: Show changes to interface from task above
debug:
var: interface
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.asa.asa import asa_argument_spec, check_args
from ansible.module_utils.network.asa.asa import get_config, load_config, run_commands
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils._text import to_native
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
contents = module.params['config']
if not contents:
contents = get_config(module)
config = NetworkConfig(indent=1, contents=contents)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
run_commands(module, 'write mem')
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
passwords=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(asa_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src'),
('defaults', 'passwords')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
check_args(module)
config = None
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
shoopio/shoop | shuup/core/excs.py | 1 | 1158 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.utils.excs import Problem
class ImmutabilityError(ValueError):
pass
class NoShippingAddressException(Exception):
pass
class NoProductsToShipException(Exception):
pass
class NoPaymentToCreateException(Exception):
pass
class NoRefundToCreateException(Exception):
pass
class RefundArbitraryRefundsNotAllowedException(Exception):
pass
class RefundExceedsAmountException(Exception):
pass
class RefundExceedsQuantityException(Exception):
pass
class InvalidRefundAmountException(Exception):
pass
class MissingSettingException(Exception):
pass
class ProductNotOrderableProblem(Problem):
pass
class ProductNotVisibleProblem(Problem):
pass
class ImpossibleProductModeException(ValueError):
def __init__(self, message, code=None):
super(ImpossibleProductModeException, self).__init__(message)
self.code = code
| agpl-3.0 |
asingla87/python-astm | astm/codec.py | 16 | 10528 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from collections import Iterable
from .compat import unicode
from .constants import (
STX, ETX, ETB, CR, LF, CRLF,
FIELD_SEP, COMPONENT_SEP, RECORD_SEP, REPEAT_SEP, ENCODING
)
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
def decode(data, encoding=ENCODING):
"""Common ASTM decoding function that tries to guess which kind of data it
handles.
If `data` starts with STX character (``0x02``) than probably it is
full ASTM message with checksum and other system characters.
If `data` starts with digit character (``0-9``) than probably it is
frame of records leading by his sequence number. No checksum is expected
in this case.
Otherwise it counts `data` as regular record structure.
Note, that `data` should be bytes, not unicode string even if you know his
`encoding`.
:param data: ASTM data object.
:type data: bytes
:param encoding: Data encoding.
:type encoding: str
:return: List of ASTM records with unicode data.
:rtype: list
"""
if not isinstance(data, bytes):
raise TypeError('bytes expected, got %r' % data)
if data.startswith(STX): # may be decode message \x02...\x03CS\r\n
seq, records, cs = decode_message(data, encoding)
return records
byte = data[:1].decode()
if byte.isdigit():
seq, records = decode_frame(data, encoding)
return records
return [decode_record(data, encoding)]
def decode_message(message, encoding):
"""Decodes complete ASTM message that is sent or received due
communication routines. It should contains checksum that would be
additionally verified.
:param message: ASTM message.
:type message: bytes
:param encoding: Data encoding.
:type encoding: str
:returns: Tuple of three elements:
* :class:`int` frame sequence number.
* :class:`list` of records with unicode data.
* :class:`bytes` checksum.
:raises:
* :exc:`ValueError` if ASTM message is malformed.
* :exc:`AssertionError` if checksum verification fails.
"""
if not isinstance(message, bytes):
raise TypeError('bytes expected, got %r' % message)
if not (message.startswith(STX) and message.endswith(CRLF)):
raise ValueError('Malformed ASTM message. Expected that it will started'
' with %x and followed by %x%x characters. Got: %r'
' ' % (ord(STX), ord(CR), ord(LF), message))
stx, frame_cs = message[0], message[1:-2]
frame, cs = frame_cs[:-2], frame_cs[-2:]
ccs = make_checksum(frame)
assert cs == ccs, 'Checksum failure: expected %r, calculated %r' % (cs, ccs)
seq, records = decode_frame(frame, encoding)
return seq, records, cs.decode()
def decode_frame(frame, encoding):
"""Decodes ASTM frame: list of records followed by sequence number."""
if not isinstance(frame, bytes):
raise TypeError('bytes expected, got %r' % frame)
if frame.endswith(CR + ETX):
frame = frame[:-2]
elif frame.endswith(ETB):
frame = frame[:-1]
else:
raise ValueError('Incomplete frame data %r.'
' Expected trailing <CR><ETX> or <ETB> chars' % frame)
seq = frame[:1].decode()
if not seq.isdigit():
raise ValueError('Malformed ASTM frame. Expected leading seq number %r'
'' % frame)
seq, records = int(seq), frame[1:]
return seq, [decode_record(record, encoding)
for record in records.split(RECORD_SEP)]
def decode_record(record, encoding):
"""Decodes ASTM record message."""
fields = []
for item in record.split(FIELD_SEP):
if REPEAT_SEP in item:
item = decode_repeated_component(item, encoding)
elif COMPONENT_SEP in item:
item = decode_component(item, encoding)
else:
item = item.decode(encoding)
fields.append([None, item][bool(item)])
return fields
def decode_component(field, encoding):
"""Decodes ASTM field component."""
return [[None, item.decode(encoding)][bool(item)]
for item in field.split(COMPONENT_SEP)]
def decode_repeated_component(component, encoding):
"""Decodes ASTM field repeated component."""
return [decode_component(item, encoding)
for item in component.split(REPEAT_SEP)]
def encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes list of records into single ASTM message, also called as "packed"
message.
If you need to get each record as standalone message use :func:`iter_encode`
instead.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:param size: Chunk size in bytes.
:type size: int
:param seq: Frame start sequence number.
:type seq: int
:return: List of ASTM message chunks.
:rtype: list
"""
msg = encode_message(seq, records, encoding)
if size is not None and len(msg) > size:
return list(split(msg, size))
return [msg]
def iter_encode(records, encoding=ENCODING, size=None, seq=1):
"""Encodes and emits each record as separate message.
If the result message is too large (greater than specified `size` if it's
not :const:`None`), than it will be split by chunks.
:yields: ASTM message chunks.
:rtype: str
"""
for record in records:
msg = encode_message(seq, [record], encoding)
if size is not None and len(msg) > size:
for chunk in split(msg, size):
seq += 1
yield chunk
else:
seq += 1
yield msg
def encode_message(seq, records, encoding):
"""Encodes ASTM message.
:param seq: Frame sequence number.
:type seq: int
:param records: List of ASTM records.
:type records: list
:param encoding: Data encoding.
:type encoding: str
:return: ASTM complete message with checksum and other control characters.
:rtype: str
"""
data = RECORD_SEP.join(encode_record(record, encoding)
for record in records)
data = b''.join((str(seq % 8).encode(), data, CR, ETX))
return b''.join([STX, data, make_checksum(data), CR, LF])
def encode_record(record, encoding):
"""Encodes single ASTM record.
:param record: ASTM record. Each :class:`str`-typed item counted as field
value, one level nested :class:`list` counted as components
and second leveled - as repeated components.
:type record: list
:param encoding: Data encoding.
:type encoding: str
:returns: Encoded ASTM record.
:rtype: str
"""
fields = []
_append = fields.append
for field in record:
if isinstance(field, bytes):
_append(field)
elif isinstance(field, unicode):
_append(field.encode(encoding))
elif isinstance(field, Iterable):
_append(encode_component(field, encoding))
elif field is None:
_append(b'')
else:
_append(unicode(field).encode(encoding))
return FIELD_SEP.join(fields)
def encode_component(component, encoding):
"""Encodes ASTM record field components."""
items = []
_append = items.append
for item in component:
if isinstance(item, bytes):
_append(item)
elif isinstance(item, unicode):
_append(item.encode(encoding))
elif isinstance(item, Iterable):
return encode_repeated_component(component, encoding)
elif item is None:
_append(b'')
else:
_append(unicode(item).encode(encoding))
return COMPONENT_SEP.join(items).rstrip(COMPONENT_SEP)
def encode_repeated_component(components, encoding):
"""Encodes repeated components."""
return REPEAT_SEP.join(encode_component(item, encoding)
for item in components)
def make_checksum(message):
"""Calculates checksum for specified message.
:param message: ASTM message.
:type message: bytes
:returns: Checksum value that is actually byte sized integer in hex base
:rtype: bytes
"""
if not isinstance(message[0], int):
message = map(ord, message)
return hex(sum(message) & 0xFF)[2:].upper().zfill(2).encode()
def make_chunks(s, n):
iter_bytes = (s[i:i + 1] for i in range(len(s)))
return [b''.join(item)
for item in izip_longest(*[iter_bytes] * n, fillvalue=b'')]
def split(msg, size):
"""Split `msg` into chunks with specified `size`.
Chunk `size` value couldn't be less then 7 since each chunk goes with at
least 7 special characters: STX, frame number, ETX or ETB, checksum and
message terminator.
:param msg: ASTM message.
:type msg: bytes
:param size: Chunk size in bytes.
:type size: int
:yield: `bytes`
"""
stx, frame, msg, tail = msg[:1], msg[1:2], msg[2:-6], msg[-6:]
assert stx == STX
assert frame.isdigit()
assert tail.endswith(CRLF)
assert size is not None and size >= 7
frame = int(frame)
chunks = make_chunks(msg, size - 7)
chunks, last = chunks[:-1], chunks[-1]
idx = 0
for idx, chunk in enumerate(chunks):
item = b''.join([str((idx + frame) % 8).encode(), chunk, ETB])
yield b''.join([STX, item, make_checksum(item), CRLF])
item = b''.join([str((idx + frame + 1) % 8).encode(), last, CR, ETX])
yield b''.join([STX, item, make_checksum(item), CRLF])
def join(chunks):
"""Merges ASTM message `chunks` into single message.
:param chunks: List of chunks as `bytes`.
:type chunks: iterable
"""
msg = b'1' + b''.join(c[2:-5] for c in chunks) + ETX
return b''.join([STX, msg, make_checksum(msg), CRLF])
def is_chunked_message(message):
"""Checks plain message for chunked byte."""
length = len(message)
if len(message) < 5:
return False
if ETB not in message:
return False
return message.index(ETB) == length - 5
| bsd-3-clause |
PaddlePaddle/models | PaddleCV/tracking/ltr/data/transforms.py | 1 | 6374 | import random
import numpy as np
import math
import cv2 as cv
from paddle.fluid import layers
from pytracking.libs.paddle_utils import PTensor
class Transform:
""" Class for applying various image transformations."""
def __call__(self, *args):
rand_params = self.roll()
if rand_params is None:
rand_params = ()
elif not isinstance(rand_params, tuple):
rand_params = (rand_params, )
output = [self.transform(img, *rand_params) for img in args]
if len(output) == 1:
return output[0]
return output
def roll(self):
return None
def transform(self, img, *args):
"""Must be deterministic"""
raise NotImplementedError
class Compose:
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *args):
for t in self.transforms:
if not isinstance(args, tuple):
args = (args, )
args = t(*args)
return args
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = np.reshape(mean, [-1, 1, 1])
self.std = np.reshape(std, [-1, 1, 1])
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return (tensor - self.mean) / self.std
class Transpose(Transform):
""" Transpose image."""
def __call__(self, img):
if len(img.shape) == 3:
img = img.transpose((2, 0, 1))
elif len(img.shape) == 2:
img = np.expand_dims(img, axis=0)
else:
raise NotImplementedError
return img.astype('float32')
class ToArray(Transform):
""" Transpose image and jitter brightness"""
def __init__(self, brightness_jitter=0.0):
self.brightness_jitter = brightness_jitter
def __call__(self, img):
img = img.transpose((2, 0, 1))
return img.astype('float32') / 255.
class ToArrayAndJitter(Transform):
""" Transpose image and jitter brightness"""
def __init__(self, brightness_jitter=0.0):
self.brightness_jitter = brightness_jitter
def roll(self):
return np.random.uniform(
max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)
def transform(self, img, brightness_factor):
# handle numpy array
img = img.transpose((2, 0, 1))
# backward compatibility
return np.clip(
img.astype('float32') * brightness_factor / 255.0, 0.0, 1.0)
class ToGrayscale(Transform):
"""Converts image to grayscale with probability"""
def __init__(self, probability=0.5):
self.probability = probability
self.color_weights = np.array(
[0.2989, 0.5870, 0.1140], dtype=np.float32)
def roll(self):
return random.random() < self.probability
def transform(self, img, do_grayscale):
if do_grayscale:
if isinstance(img, PTensor):
raise NotImplementedError('Implement paddle variant.')
img_gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
return np.stack([img_gray, img_gray, img_gray], axis=2)
# return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)
return img
class RandomHorizontalFlip(Transform):
"""Horizontally flip the given NumPy Image randomly with a probability p."""
def __init__(self, probability=0.5):
self.probability = probability
def roll(self):
return random.random() < self.probability
def transform(self, img, do_flip):
if do_flip:
if isinstance(img, PTensor):
return layers.reverse(img, 2)
return np.fliplr(img).copy()
return img
class Blur(Transform):
""" Blur the image by applying a random kernel."""
def __init__(self, probability=0.5):
self.probability = probability
def roll(self):
return random.random() < self.probability
def transform(self, img, do_blur):
def rand_kernel():
sizes = np.arange(5, 46, 2)
size = np.random.choice(sizes)
kernel = np.zeros((size, size))
c = int(size/2)
wx = np.random.random()
kernel[:, c] += 1. / size * wx
kernel[c, :] += 1. / size * (1-wx)
return kernel
if do_blur:
kernel = rand_kernel()
img = cv.filter2D(img, -1, kernel)
return img
class Color(Transform):
""" Blur the image by applying a random kernel."""
def __init__(self, probability=1):
self.probability = probability
self.rgbVar = np.array(
[
[-0.55919361, 0.98062831, - 0.41940627],
[1.72091413, 0.19879334, - 1.82968581],
[4.64467907, 4.73710203, 4.88324118]
],
dtype=np.float32)
def roll(self):
return random.random() < self.probability
def transform(self, img, do_color_aug):
if do_color_aug:
offset = np.dot(self.rgbVar, np.random.randn(3, 1))
offset = offset.reshape(3)
img = img - offset
return img
| apache-2.0 |
silentfuzzle/calibre | src/calibre/gui2/catalog/catalog_bibtex.py | 14 | 3807 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2 import gprefs
from calibre.gui2.catalog.catalog_bibtex_ui import Ui_Form
from PyQt5.Qt import QWidget, QListWidgetItem
class PluginWidget(QWidget, Ui_Form):
TITLE = _('BibTeX Options')
HELP = _('Options specific to')+' BibTeX '+_('output')
OPTION_FIELDS = [('bib_cit','{authors}{id}'),
('bib_entry', 0), # mixed
('bibfile_enc', 0), # utf-8
('bibfile_enctag', 0), # strict
('impcit', True),
('addfiles', False),
]
sync_enabled = False
formats = set(['bib'])
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
def initialize(self, name, db): # not working properly to update
from calibre.library.catalogs import FIELDS
self.all_fields = [x for x in FIELDS if x != 'all']
# add custom columns
for x in sorted(db.custom_field_keys()):
self.all_fields.append(x)
if db.field_metadata[x]['datatype'] == 'series':
self.all_fields.append(x+'_index')
# populate
for x in self.all_fields:
QListWidgetItem(x, self.db_fields)
self.name = name
fields = gprefs.get(name+'_db_fields', self.all_fields)
# Restore the activated db_fields from last use
for x in xrange(self.db_fields.count()):
item = self.db_fields.item(x)
item.setSelected(unicode(item.text()) in fields)
self.bibfile_enc.clear()
self.bibfile_enc.addItems(['utf-8', 'cp1252', 'ascii/LaTeX'])
self.bibfile_enctag.clear()
self.bibfile_enctag.addItems(['strict', 'replace', 'ignore',
'backslashreplace'])
self.bib_entry.clear()
self.bib_entry.addItems(['mixed', 'misc', 'book'])
# Update dialog fields from stored options
for opt in self.OPTION_FIELDS:
opt_value = gprefs.get(self.name + '_' + opt[0], opt[1])
if opt[0] in ['bibfile_enc', 'bibfile_enctag', 'bib_entry']:
getattr(self, opt[0]).setCurrentIndex(opt_value)
elif opt[0] in ['impcit', 'addfiles'] :
getattr(self, opt[0]).setChecked(opt_value)
else:
getattr(self, opt[0]).setText(opt_value)
def options(self):
# Save the currently activated fields
fields = []
for x in xrange(self.db_fields.count()):
item = self.db_fields.item(x)
if item.isSelected():
fields.append(unicode(item.text()))
gprefs.set(self.name+'_db_fields', fields)
# Dictionary currently activated fields
if len(self.db_fields.selectedItems()):
opts_dict = {'fields':[unicode(i.text()) for i in self.db_fields.selectedItems()]}
else:
opts_dict = {'fields':['all']}
# Save/return the current options
# bib_cit stores as text
# 'bibfile_enc','bibfile_enctag' stores as int (Indexes)
for opt in self.OPTION_FIELDS:
if opt[0] in ['bibfile_enc', 'bibfile_enctag', 'bib_entry']:
opt_value = getattr(self,opt[0]).currentIndex()
elif opt[0] in ['impcit', 'addfiles'] :
opt_value = getattr(self, opt[0]).isChecked()
else :
opt_value = unicode(getattr(self, opt[0]).text())
gprefs.set(self.name + '_' + opt[0], opt_value)
opts_dict[opt[0]] = opt_value
return opts_dict
| gpl-3.0 |
0Chencc/CTFCrackTools | Lib/site-packages/pip/__init__.py | 328 | 11348 | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "9.0.1"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
ulope/geopy | geopy/geocoders/virtual_earth.py | 1 | 2555 | import re
from urllib2 import urlopen
from urllib import urlencode
import simplejson
from geopy.geocoders.base import Geocoder
from geopy.point import Point
from geopy.location import Location
from geopy import util
import logging
log = logging.getLogger(__name__)
class VirtualEarth(Geocoder):
"""Geocoder using Microsoft's Windows Live Local web service, powered by
Virtual Earth.
WARNING: This does not use a published API and can easily break if
Microsoft changes their JavaScript.
"""
SINGLE_LOCATION = re.compile(r"AddLocation\((.*?')\)")
AMBIGUOUS_LOCATION = re.compile(r"UpdateAmbiguousList\(\[(.*?)\]\)")
AMBIGUOUS_SPLIT = re.compile(r"\s*,?\s*new Array\(")
STRING_QUOTE = re.compile(r"(?<!\\)'")
def __init__(self, domain='local.live.com', format_string='%s'):
self.domain = domain
self.format_string = format_string
@property
def url(self):
domain = self.domain
resource = "search.ashx"
return "http://%(domain)s/%(resource)s?%%s" % locals()
def geocode(self, string, exactly_one=True):
params = {'b': self.format_string % string}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
log.debug("Fetching %s..." % url)
page = urlopen(url)
return self.parse_javascript(page, exactly_one)
def parse_javascript(self, page, exactly_one=True):
if not isinstance(page, basestring):
page = util.decode_page(page)
matches = self.SINGLE_LOCATION.findall(page)
if not matches:
for match in self.AMBIGUOUS_LOCATION.findall(page):
places = self.AMBIGUOUS_SPLIT.split(match)
matches.extend([place for place in places if place])
if exactly_one and len(matches) != 1:
raise ValueError("Didn't find exactly one location! " \
"(Found %d.)" % len(matches))
def parse_match(match):
json = "[%s]" % self.STRING_QUOTE.sub('"', match.strip('()'))
array = simplejson.loads(json)
if len(array) == 8:
location, (latitude, longitude) = array[0], array[5:7]
else:
location, latitude, longitude = array[:3]
return (location, (latitude, longitude))
if exactly_one:
return parse_match(matches[0])
else:
return (parse_match(match) for match in matches) | mit |
slayerjain/servo | tests/wpt/harness/wptrunner/testrunner.py | 43 | 25272 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import multiprocessing
import sys
import threading
import traceback
from Queue import Empty
from multiprocessing import Process, current_process, Queue
from mozlog import structuredlog
# Special value used as a sentinal in various commands
Stop = object()
class MessageLogger(object):
def __init__(self, message_func):
self.send_message = message_func
def _log_data(self, action, **kwargs):
self.send_message("log", action, kwargs)
def process_output(self, process, data, command):
self._log_data("process_output", process=process, data=data, command=command)
def _log_func(level_name):
def log(self, message):
self._log_data(level_name.lower(), message=message)
log.__doc__ = """Log a message with level %s
:param message: The string message to log
""" % level_name
log.__name__ = str(level_name).lower()
return log
# Create all the methods on StructuredLog for debug levels
for level_name in structuredlog.log_levels:
setattr(MessageLogger, level_name.lower(), _log_func(level_name))
class TestRunner(object):
def __init__(self, test_queue, command_queue, result_queue, executor):
"""Class implementing the main loop for running tests.
This class delegates the job of actually running a test to the executor
that is passed in.
:param test_queue: subprocess.Queue containing the tests to run
:param command_queue: subprocess.Queue used to send commands to the
process
:param result_queue: subprocess.Queue used to send results to the
parent TestManager process
:param executor: TestExecutor object that will actually run a test.
"""
self.test_queue = test_queue
self.command_queue = command_queue
self.result_queue = result_queue
self.executor = executor
self.name = current_process().name
self.logger = MessageLogger(self.send_message)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.teardown()
def setup(self):
self.executor.setup(self)
def teardown(self):
self.executor.teardown()
self.send_message("runner_teardown")
self.result_queue = None
self.command_queue = None
self.browser = None
def run(self):
"""Main loop accepting commands over the pipe and triggering
the associated methods"""
self.setup()
commands = {"run_test": self.run_test,
"stop": self.stop,
"wait": self.wait}
while True:
command, args = self.command_queue.get()
try:
rv = commands[command](*args)
except Exception:
self.send_message("error",
"Error running command %s with arguments %r:\n%s" %
(command, args, traceback.format_exc()))
else:
if rv is Stop:
break
def stop(self):
return Stop
def run_test(self):
if not self.executor.is_alive():
self.send_message("restart_runner")
return
try:
# Need to block here just to allow for contention with other processes
test = self.test_queue.get(block=True, timeout=1)
except Empty:
# If we are running tests in groups (e.g. by-dir) then this queue might be
# empty but there could be other test queues. restart_runner won't actually
# start the runner if there aren't any more tests to run
self.send_message("restart_runner")
return
else:
self.send_message("test_start", test)
try:
return self.executor.run_test(test)
except Exception:
self.logger.critical(traceback.format_exc())
raise
def wait(self):
self.executor.protocol.wait()
self.send_message("after_test_ended", True)
def send_message(self, command, *args):
self.result_queue.put((command, args))
def start_runner(test_queue, runner_command_queue, runner_result_queue,
executor_cls, executor_kwargs,
executor_browser_cls, executor_browser_kwargs,
stop_flag):
"""Launch a TestRunner in a new process"""
try:
browser = executor_browser_cls(**executor_browser_kwargs)
executor = executor_cls(browser, **executor_kwargs)
with TestRunner(test_queue, runner_command_queue, runner_result_queue, executor) as runner:
try:
runner.run()
except KeyboardInterrupt:
stop_flag.set()
except Exception:
runner_result_queue.put(("log", ("critical", {"message": traceback.format_exc()})))
print >> sys.stderr, traceback.format_exc()
stop_flag.set()
finally:
runner_command_queue = None
runner_result_queue = None
manager_count = 0
def next_manager_number():
global manager_count
local = manager_count = manager_count + 1
return local
class TestRunnerManager(threading.Thread):
init_lock = threading.Lock()
def __init__(self, suite_name, test_queue, test_source_cls, browser_cls, browser_kwargs,
executor_cls, executor_kwargs, stop_flag, pause_after_test=False,
pause_on_unexpected=False, debug_info=None):
"""Thread that owns a single TestRunner process and any processes required
by the TestRunner (e.g. the Firefox binary).
TestRunnerManagers are responsible for launching the browser process and the
runner process, and for logging the test progress. The actual test running
is done by the TestRunner. In particular they:
* Start the binary of the program under test
* Start the TestRunner
* Tell the TestRunner to start a test, if any
* Log that the test started
* Log the test results
* Take any remedial action required e.g. restart crashed or hung
processes
"""
self.suite_name = suite_name
self.test_queue = test_queue
self.test_source_cls = test_source_cls
self.browser_cls = browser_cls
self.browser_kwargs = browser_kwargs
self.executor_cls = executor_cls
self.executor_kwargs = executor_kwargs
self.test_source = None
self.browser = None
self.browser_pid = None
self.browser_started = False
# Flags used to shut down this thread if we get a sigint
self.parent_stop_flag = stop_flag
self.child_stop_flag = multiprocessing.Event()
self.pause_after_test = pause_after_test
self.pause_on_unexpected = pause_on_unexpected
self.debug_info = debug_info
self.manager_number = next_manager_number()
self.command_queue = Queue()
self.remote_queue = Queue()
self.test_runner_proc = None
threading.Thread.__init__(self, name="Thread-TestrunnerManager-%i" % self.manager_number)
# This is started in the actual new thread
self.logger = None
# The test that is currently running
self.test = None
self.unexpected_count = 0
# This may not really be what we want
self.daemon = True
self.init_fail_count = 0
self.max_init_fails = 5
self.init_timer = None
self.restart_count = 0
self.max_restarts = 5
def run(self):
"""Main loop for the TestManager.
TestManagers generally receive commands from their
TestRunner updating them on the status of a test. They
may also have a stop flag set by the main thread indicating
that the manager should shut down the next time the event loop
spins."""
self.logger = structuredlog.StructuredLogger(self.suite_name)
with self.browser_cls(self.logger, **self.browser_kwargs) as browser, self.test_source_cls(self.test_queue) as test_source:
self.browser = browser
self.test_source = test_source
try:
if self.init() is Stop:
return
while True:
commands = {"init_succeeded": self.init_succeeded,
"init_failed": self.init_failed,
"test_start": self.test_start,
"test_ended": self.test_ended,
"after_test_ended": self.after_test_ended,
"restart_runner": self.restart_runner,
"runner_teardown": self.runner_teardown,
"log": self.log,
"error": self.error}
try:
command, data = self.command_queue.get(True, 1)
except IOError:
if not self.should_stop():
self.logger.error("Got IOError from poll")
self.restart_count += 1
if self.restart_runner() is Stop:
break
except Empty:
command = None
if self.should_stop():
self.logger.debug("A flag was set; stopping")
break
if command is not None:
self.restart_count = 0
if commands[command](*data) is Stop:
break
else:
if (self.debug_info and self.debug_info.interactive and
self.browser_started and not browser.is_alive()):
self.logger.debug("Debugger exited")
break
if not self.test_runner_proc.is_alive():
if not self.command_queue.empty():
# We got a new message so process that
continue
# If we got to here the runner presumably shut down
# unexpectedly
self.logger.info("Test runner process shut down")
if self.test is not None:
# This could happen if the test runner crashed for some other
# reason
# Need to consider the unlikely case where one test causes the
# runner process to repeatedly die
self.logger.critical("Last test did not complete")
break
self.logger.warning(
"More tests found, but runner process died, restarting")
self.restart_count += 1
if self.restart_runner() is Stop:
break
finally:
self.logger.debug("TestRunnerManager main loop terminating, starting cleanup")
self.stop_runner()
self.teardown()
self.logger.debug("TestRunnerManager main loop terminated")
def should_stop(self):
return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
def init(self):
"""Launch the browser that is being tested,
and the TestRunner process that will run the tests."""
# It seems that this lock is helpful to prevent some race that otherwise
# sometimes stops the spawned processes initalising correctly, and
# leaves this thread hung
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.debug("Init called, starting browser and runner")
def init_failed():
# This is called from a seperate thread, so we send a message to the
# main loop so we get back onto the manager thread
self.logger.debug("init_failed called from timer")
if self.command_queue:
self.command_queue.put(("init_failed", ()))
else:
self.logger.debug("Setting child stop flag in init_failed")
self.child_stop_flag.set()
with self.init_lock:
# Guard against problems initialising the browser or the browser
# remote control method
if self.debug_info is None:
self.init_timer = threading.Timer(self.browser.init_timeout, init_failed)
test_queue = self.test_source.get_queue()
if test_queue is None:
self.logger.info("No more tests")
return Stop
try:
if self.init_timer is not None:
self.init_timer.start()
self.browser.start()
self.browser_pid = self.browser.pid()
self.start_test_runner(test_queue)
except:
self.logger.warning("Failure during init %s" % traceback.format_exc())
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.error(traceback.format_exc())
succeeded = False
else:
succeeded = True
self.browser_started = True
# This has to happen after the lock is released
if not succeeded:
self.init_failed()
def init_succeeded(self):
"""Callback when we have started the browser, started the remote
control connection, and we are ready to start testing."""
self.logger.debug("Init succeeded")
if self.init_timer is not None:
self.init_timer.cancel()
self.init_fail_count = 0
self.start_next_test()
def init_failed(self):
"""Callback when starting the browser or the remote control connect
fails."""
self.init_fail_count += 1
self.logger.warning("Init failed %i" % self.init_fail_count)
if self.init_timer is not None:
self.init_timer.cancel()
if self.init_fail_count < self.max_init_fails:
self.restart_runner()
else:
self.logger.critical("Test runner failed to initialise correctly; shutting down")
return Stop
def start_test_runner(self, test_queue):
# Note that we need to be careful to start the browser before the
# test runner to ensure that any state set when the browser is started
# can be passed in to the test runner.
assert self.command_queue is not None
assert self.remote_queue is not None
self.logger.info("Starting runner")
executor_browser_cls, executor_browser_kwargs = self.browser.executor_browser()
args = (test_queue,
self.remote_queue,
self.command_queue,
self.executor_cls,
self.executor_kwargs,
executor_browser_cls,
executor_browser_kwargs,
self.child_stop_flag)
self.test_runner_proc = Process(target=start_runner,
args=args,
name="Thread-TestRunner-%i" % self.manager_number)
self.test_runner_proc.start()
self.logger.debug("Test runner started")
def send_message(self, command, *args):
self.remote_queue.put((command, args))
def cleanup(self):
if self.init_timer is not None:
self.init_timer.cancel()
self.logger.debug("TestManager cleanup")
while True:
try:
self.logger.warning(" ".join(map(repr, self.command_queue.get_nowait())))
except Empty:
break
while True:
try:
self.logger.warning(" ".join(map(repr, self.remote_queue.get_nowait())))
except Empty:
break
def teardown(self):
self.logger.debug("teardown in testrunnermanager")
self.test_runner_proc = None
self.command_queue.close()
self.remote_queue.close()
self.command_queue = None
self.remote_queue = None
def ensure_runner_stopped(self):
if self.test_runner_proc is None:
return
self.test_runner_proc.join(10)
if self.test_runner_proc.is_alive():
# This might leak a file handle from the queue
self.logger.warning("Forcibly terminating runner process")
self.test_runner_proc.terminate()
self.test_runner_proc.join(10)
else:
self.logger.debug("Testrunner exited with code %i" % self.test_runner_proc.exitcode)
def runner_teardown(self):
self.ensure_runner_stopped()
return Stop
def stop_runner(self):
"""Stop the TestRunner and the Firefox binary."""
self.logger.debug("Stopping runner")
if self.test_runner_proc is None:
return
try:
self.browser.stop()
self.browser_started = False
if self.test_runner_proc.is_alive():
self.send_message("stop")
self.ensure_runner_stopped()
finally:
self.cleanup()
def start_next_test(self):
self.send_message("run_test")
def test_start(self, test):
self.test = test
self.logger.test_start(test.id)
def test_ended(self, test, results):
"""Handle the end of a test.
Output the result of each subtest, and the result of the overall
harness to the logs.
"""
assert test == self.test
# Write the result of each subtest
file_result, test_results = results
subtest_unexpected = False
for result in test_results:
if test.disabled(result.name):
continue
expected = test.expected(result.name)
is_unexpected = expected != result.status
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
subtest_unexpected = True
self.logger.test_status(test.id,
result.name,
result.status,
message=result.message,
expected=expected,
stack=result.stack)
# TODO: consider changing result if there is a crash dump file
# Write the result of the test harness
expected = test.expected()
status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
is_unexpected = expected != status
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
if status == "CRASH":
self.browser.log_crash(process=self.browser_pid, test=test.id)
self.logger.test_end(test.id,
status,
message=file_result.message,
expected=expected,
extra=file_result.extra)
self.test = None
restart_before_next = (file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
subtest_unexpected or is_unexpected)
if (self.pause_after_test or
(self.pause_on_unexpected and (subtest_unexpected or is_unexpected))):
self.logger.info("Pausing until the browser exits")
self.send_message("wait")
else:
self.after_test_ended(restart_before_next)
def after_test_ended(self, restart_before_next):
# Handle starting the next test, with a runner restart if required
if restart_before_next:
return self.restart_runner()
else:
return self.start_next_test()
def restart_runner(self):
"""Stop and restart the TestRunner"""
if self.restart_count >= self.max_restarts:
return Stop
self.stop_runner()
return self.init()
def log(self, action, kwargs):
getattr(self.logger, action)(**kwargs)
def error(self, message):
self.logger.error(message)
self.restart_runner()
class TestQueue(object):
def __init__(self, test_source_cls, test_type, tests, **kwargs):
self.queue = None
self.test_source_cls = test_source_cls
self.test_type = test_type
self.tests = tests
self.kwargs = kwargs
def __enter__(self):
if not self.tests[self.test_type]:
return None
self.queue = Queue()
has_tests = self.test_source_cls.queue_tests(self.queue,
self.test_type,
self.tests,
**self.kwargs)
# There is a race condition that means sometimes we continue
# before the tests have been written to the underlying pipe.
# Polling the pipe for data here avoids that
self.queue._reader.poll(10)
assert not self.queue.empty()
return self.queue
def __exit__(self, *args, **kwargs):
if self.queue is not None:
self.queue.close()
self.queue = None
class ManagerGroup(object):
def __init__(self, suite_name, size, test_source_cls, test_source_kwargs,
browser_cls, browser_kwargs,
executor_cls, executor_kwargs,
pause_after_test=False,
pause_on_unexpected=False,
debug_info=None):
"""Main thread object that owns all the TestManager threads."""
self.suite_name = suite_name
self.size = size
self.test_source_cls = test_source_cls
self.test_source_kwargs = test_source_kwargs
self.browser_cls = browser_cls
self.browser_kwargs = browser_kwargs
self.executor_cls = executor_cls
self.executor_kwargs = executor_kwargs
self.pause_after_test = pause_after_test
self.pause_on_unexpected = pause_on_unexpected
self.debug_info = debug_info
self.pool = set()
# Event that is polled by threads so that they can gracefully exit in the face
# of sigint
self.stop_flag = threading.Event()
self.logger = structuredlog.StructuredLogger(suite_name)
self.test_queue = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self, test_type, tests):
"""Start all managers in the group"""
self.logger.debug("Using %i processes" % self.size)
self.test_queue = TestQueue(self.test_source_cls,
test_type,
tests,
**self.test_source_kwargs)
with self.test_queue as test_queue:
if test_queue is None:
self.logger.info("No %s tests to run" % test_type)
return
for _ in range(self.size):
manager = TestRunnerManager(self.suite_name,
test_queue,
self.test_source_cls,
self.browser_cls,
self.browser_kwargs,
self.executor_cls,
self.executor_kwargs,
self.stop_flag,
self.pause_after_test,
self.pause_on_unexpected,
self.debug_info)
manager.start()
self.pool.add(manager)
self.wait()
def is_alive(self):
"""Boolean indicating whether any manager in the group is still alive"""
return any(manager.is_alive() for manager in self.pool)
def wait(self):
"""Wait for all the managers in the group to finish"""
for item in self.pool:
item.join()
def stop(self):
"""Set the stop flag so that all managers in the group stop as soon
as possible"""
self.stop_flag.set()
self.logger.debug("Stop flag set in ManagerGroup")
def unexpected_count(self):
return sum(item.unexpected_count for item in self.pool)
| mpl-2.0 |
Lekanich/intellij-community | python/lib/Lib/site-packages/django/core/serializers/__init__.py | 74 | 3535 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
| apache-2.0 |
simoninireland/epydemic | epydemic/__init__.py | 1 | 3722 | # Initialisation for epydemic
#
# Copyright (C) 2017--2020 Simon Dobson
#
# This file is part of epydemic, epidemic network simulations in Python.
#
# epydemic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# epydemic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with epydemic. If not, see <http://www.gnu.org/licenses/gpl.html>.
'''`epydemic` is a library for performing simulations for a range of
epidemic spreading (and other) processes, simulated over networks
represented using `networkx`.
Epidemic processes are very important in both network science and its
applications. The most common application is to study the was in which
diseases progress in different network conditions, depending on their
infectiousness and other properties. Typically such processes are
modelled as a :term:`compartmented model of disease` with conditional
probabilities for moving between compartments (familiar to
computer scientists as stochastic finite state machines).
`epydemic` provides the basic simulation machinery for performing
epidemic simulations under two different simulation regimes:
synchronous :term:`discrete time` simulation in which time proceeds in
discrete time intervals, and stochastic or Gillespie :term:`continuous
time` simulations which are better for handling a wider range of
:term:`event` probabilities (but which are slightly harder to
specify).
`epydemic` also provides processes not directly connected with epidemic modelling
but which are often used alongside disease models. This includes
support for add/delete networks and percolation processes, as well as
support for studying the percolation transition in networks and using
generating functions to analyse their behaviour. Individual
network processes can be combined in different ways to encourage good
software engineering practices.
'''
# helper types
from .types import Node, Edge, Element
from .bitstream import Bitstream
from .bbt import TreeNode
from .drawset import DrawSet
# network processes
from .loci import Locus
from .process import Process, EventFunction, EventDistribution
# network generators
from .generator import NetworkGenerator
from .standard_generators import FixedNetwork, ERNetwork, BANetwork
from .plc_generator import PLCNetwork
# networks with dynamical processes
from .networkexperiment import NetworkExperiment
from .networkdynamics import Dynamics
from .synchronousdynamics import SynchronousDynamics
from .stochasticdynamics import StochasticDynamics
# compartmented models
from .compartmentedmodel import CompartmentedModel, CompartmentedLocus, CompartmentedNodeLocus, CompartmentedEdgeLocus
# reference disease models
from .sir_model import SIR
from .sis_model import SIS
from .sirs_model import SIRS
from .seir_model import SEIR
# variant disease models
from .sir_model_fixed_recovery import SIR_FixedRecovery
from .sis_model_fixed_recovery import SIS_FixedRecovery
# other processes
from .adddelete import AddDelete
from .percolate import Percolate
from .monitor import Monitor
from .statistics import NetworkStatistics
from .shuffle import ShuffleK
# process combinators
from .processsequence import ProcessSequence
# other experiments
from .newmanziff import BondPercolation, SitePercolation
# late initialisation
Bitstream.init_default_rng()
| gpl-3.0 |
dfunckt/django | tests/template_tests/syntax_tests/test_named_endblock.py | 521 | 2312 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class NamedEndblockTests(SimpleTestCase):
@setup({'namedendblocks01': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock first %}3'})
def test_namedendblocks01(self):
output = self.engine.render_to_string('namedendblocks01')
self.assertEqual(output, '1_2_3')
# Unbalanced blocks
@setup({'namedendblocks02': '1{% block first %}_{% block second %}'
'2{% endblock first %}_{% endblock second %}3'})
def test_namedendblocks02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks02')
@setup({'namedendblocks03': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock second %}3'})
def test_namedendblocks03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks03')
@setup({'namedendblocks04': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock third %}3'})
def test_namedendblocks04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks04')
@setup({'namedendblocks05': '1{% block first %}_{% block second %}2{% endblock first %}'})
def test_namedendblocks05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('namedendblocks05')
# Mixed named and unnamed endblocks
@setup({'namedendblocks06': '1{% block first %}_{% block second %}'
'2{% endblock %}_{% endblock first %}3'})
def test_namedendblocks06(self):
"""
Mixed named and unnamed endblocks
"""
output = self.engine.render_to_string('namedendblocks06')
self.assertEqual(output, '1_2_3')
@setup({'namedendblocks07': '1{% block first %}_{% block second %}'
'2{% endblock second %}_{% endblock %}3'})
def test_namedendblocks07(self):
output = self.engine.render_to_string('namedendblocks07')
self.assertEqual(output, '1_2_3')
| bsd-3-clause |
HeinleinSupport/check_mk | python-cryptography/lib/python/cryptography/hazmat/primitives/asymmetric/x25519.py | 8 | 1652 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
@six.add_metaclass(abc.ABCMeta)
class X25519PublicKey(object):
@classmethod
def from_public_bytes(cls, data):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.x25519_supported():
raise UnsupportedAlgorithm(
"X25519 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
return backend.x25519_load_public_bytes(data)
@abc.abstractmethod
def public_bytes(self):
pass
@six.add_metaclass(abc.ABCMeta)
class X25519PrivateKey(object):
@classmethod
def generate(cls):
from cryptography.hazmat.backends.openssl.backend import backend
if not backend.x25519_supported():
raise UnsupportedAlgorithm(
"X25519 is not supported by this version of OpenSSL.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
return backend.x25519_generate_key()
@classmethod
def _from_private_bytes(cls, data):
from cryptography.hazmat.backends.openssl.backend import backend
return backend.x25519_load_private_bytes(data)
@abc.abstractmethod
def public_key(self):
pass
@abc.abstractmethod
def exchange(self, peer_public_key):
pass
| gpl-2.0 |
jsburg/xdsme | XOconv/ThreeAxisRotation.py | 8 | 10447 | # This module defines a class that represent coordinate rotation by
# a three axis rotation system. Handy for goniosta systems, Euler angles or Kappa
# angles manipulation.
#
# Need the ScientificPython module by Konrad Hinsen
# http://starship.python.net/crew/hinsen/scientific.html
# NOTE: The getAngles method has been integrated to ScientificPython since the
# 2.4.9 version (in the Rotation class with method name ...).
# TODO:
# - Add the posibility to input/output angles in radian (defaults) or degree.
# - Add doctest.
__author__ = "Pierre Legrand (pierre.legrand \at synchrotron-soleil.fr)"
__date__ = "16-05-2005"
__copyright__ = "Copyright (c) 2004-2005 Pierre Legrand"
__license__ = "GPL"
print "New"
import Numeric
from Scientific.Geometry.VectorModule import *
from Scientific.Geometry.TensorModule import *
from Scientific.Geometry.Transformation import *
import math
pi = math.pi
r2d = 180/pi
# Utility functions
def mod_angle(angle, mod):
return (angle + mod/2.) % mod - mod/2
def kappaVector(alpha):
return Vector([-1*math.sin(alpha), 0, math.cos(alpha)])
def angleFromSineAndCosine2(y, x):
try: return math.atan2(y, x)
except: raise TypeError, 'ThreeAxisRotation:FAILURE in atan2'
# Some Axes Definitions
StandardAxes = ex, ey, ez
NoniusKappaAxes = ez, kappaVector(50/r2d), ez
EulerAxes = ez, ex, ez
DenzoAxes = ez, ey, ex
class ThreeAxisRotation(Rotation):
"""Rotational transformation using three successive axes.
This is a subclass of Rotation (which is a subclass of Transformation).
Constructor:
Rotation(init, rotationAxes=(ex, ey, ez), inversAxesOrder=0), where:
- init can be either:
- a tensor object containing the rotation matrix,
- a Rotation object,
- a set of three angles (a1, a2, a3) in radian
- rotationAxes is a tupple of 3 vectors (e1, e2, e3) defining the three
rotation axes. Default axes are (ex, ey, ez).
The resulting rotation is obtained by applying successivly:
- a1 angle around e1 axis, and then
- a2 angle around e2 axis, and finaly
- a3 angle around e3.
- If inversAxesOrder is set to 1, the the rotations are done in the
reverse order (a3 angle around e3, a2 around e2 and a1 around e1).
Rq: ThreeAxisRotation((a1,a2,a3),rotationAxes=(e1,e2,e3),inversAxesOrder=0)
is equivalent to:
ThreeAxisRotation((a3,a2,a1),rotationAxes=(e3,e2,e1),inversAxesOrder=1)
"""
def __init__(self, init, rotationAxes=(ex, ey, ez), inversAxesOrder=0):
self.inversAxesOrder = inversAxesOrder
self.e1 = Vector(rotationAxes[0]).normal()
self.e2 = Vector(rotationAxes[1]).normal()
self.e3 = Vector(rotationAxes[2]).normal()
if inversAxesOrder:
self.e1 = Vector(rotationAxes[2]).normal()
self.e3 = Vector(rotationAxes[0]).normal()
self.rotationAxes = self.e1, self.e2, self.e3
if hasattr(init,'is_tensor') == 1 :
self.tensor = init
elif hasattr(init,'is_rotation') == 1:
self.tensor = init.tensor
elif len(init) == 3:
print "len3"
a1, a2, a3 = init
print a1, a2, a3
if inversAxesOrder: a3, a2, a1 = init
R1 = Rotation(self.e1, a1)
R2 = Rotation(self.e2, a2)
R3 = Rotation(self.e3, a3)
self.tensor = (R1*R2*R3).tensor
elif len(init) == 9 and type(init) == list:
"Used for compatibility with other tensor types, like cgtypes.mat3"
"In that case, use mat3.mlist."
self.tensor = Tensor([[init[0], init[1], init[2]],
[init[3], init[4], init[5]],
[init[6], init[7], init[8]]])
else:
raise TypeError, 'no valid arguments'
is_3axes_rotation = 1
def addAngles(self, angles):
"""Add angles to the current rotation.
Does it works???."""
a1, a2, a3 = angles
if self.inversAxesOrder: a3, a2, a1 = angles
R1 = Rotation(self.e1, a1)
R2 = Rotation(self.e2, a2)
R3 = Rotation(self.e3, a3)
#self.tensor = (R1*R2*R3*self).tensor
self.tensor = (self*R1*R2*R3).tensor
def setAngles(self, angles):
"Set angles defining a new rotation"
a1, a2, a3 = angles
if self.inversAxesOrder: a3, a2, a1 = angles
R1 = Rotation(self.e1, a1)
R2 = Rotation(self.e2, a2)
R3 = Rotation(self.e3, a3)
self.tensor = (R1*R2*R3).tensor
def getAngles(self, tolerance=1e-7):
"""
Get angles from the matrix given a set of 3 unit vectors describing
a 3-axis goniometer. Basicly this a reimplementation of the David
Thomas's algorithm [1] described by Gerard Bricogne in [2]:
[1] "Modern Equations of Diffractometry. Goniometry." D.J. Thomas
Acta Cryst. (1990) A46 Page 321-343.
[2] "The ECC Cooperative Programming Workshop on Position-Sensitive
Detector Software." G. Bricogne,
Computational aspect of Protein Crystal Data Analysis,
Proceedings of the Daresbury Study Weekend (23-24/01/1987)
Page 122-126
"""
# We are searching for the three angles a1, a2, a3
e1, e2, e3 = self.rotationAxes
# If 2 consecutive axes are parallel: decomposition is not meaningful
if (e1.cross(e2)).length() < tolerance or \
(e2.cross(e3)).length() < tolerance :
raise ValueError, 'Consecutive parallel axes. To many solutions'
ROT = Rotation(self.tensor)
w = ROT(e3)
# Solve the equation : _a.cosx + _b.sinx = _c
_a = e1*e3 - (e1*e2)*(e2*e3)
_b = e1*(e2.cross(e3))
_c = e1*w - (e1*e2)*(e2*e3)
_norm = (_a**2 + _b**2)**0.5
# Checking for possible errors in initial Rot matrix
if _norm == 0: raise ValueError, 'ThreeAxisRotation:FAILURE 1, norm = 0'
if abs(_c/_norm) > 1+tolerance:
raise ValueError, 'ThreeAxisRotation:FAILURE 2' + \
'malformed rotation Tensor (non orthogonal?) %.8f' % (_c/_norm)
_c_norm = _c/_norm
if _c_norm > 1:
if _c_norm < 1 + tolerance: _c_norm = 1
else: raise ValueError, 'Step1: No solution'
_th = angleFromSineAndCosine2(_b/_norm, _a/_norm)
_xmth = math.acos((_c_norm))
# a2a and a2b are the two possible solutions to the equation.
a2a = mod_angle((_th + _xmth), 2*pi)
a2b = mod_angle((_th - _xmth), 2*pi)
solutions = []
# for each solution, find the two other angles (a1, a3).
for a2 in (a2a, a2b):
R2 = Rotation(e2, a2)
v = R2(e3)
v1 = v - (v*e1)*e1
w1 = w - (w*e1)*e1
norm = ((v1*v1)*(w1*w1))**0.5
if norm == 0:
# in that case rotation 1 and 3 are about the same axis
# so any solution for rotation 1 is OK
a1 = 0.
else:
cosa1 = (v1*w1)/norm
sina1 = v1*(w1.cross(e1))/norm
a1 = mod_angle(angleFromSineAndCosine2(sina1, cosa1), 2*pi)
R3 = Rotation(e2, -1*a2)*Rotation(e1, -1*a1)*ROT
# u = normalized test vector perpendicular to e3
# if e2 and e3 are // we have an exception before.
# if we take u = e1^e3 then it will not work for Euler and Kappa axes.
u = (e2.cross(e3)).normal()
cosa3 = u*R3(u)
sina3 = u*(R3(u).cross(e3))
a3 = mod_angle(angleFromSineAndCosine2(sina3, cosa3), 2*pi)
if self.inversAxesOrder: solutions.append(Numeric.array([a3, a2, a1]))
else: solutions.append(Numeric.array([a1, a2, a3]))
# Gives the closest solution to 0,0,0 first
if Numeric.add.reduce(solutions[0]**2) > Numeric.add.reduce(solutions[1]**2):
solutions = [solutions[1], solutions[0]]
return solutions
def asQuaternion(self):
from Scientific.Geometry.Quaternion import Quaternion
axis, angle = self.axisAndAngle()
sin_angle_2 = math.sin(0.5*angle)
cos_angle_2 = math.cos(0.5*angle)
return Quaternion(cos_angle_2, sin_angle_2*axis[0],
sin_angle_2*axis[1], sin_angle_2*axis[2])
# Tests functions
def random_3axes():
r = random.uniform
return (Vector(r(-1.,1.),r(-1.,1.),r(-1.,1.)).normal(),
Vector(r(-1.,1.),r(-1.,1.),r(-1.,1.)).normal(),
Vector(r(-1.,1.),r(-1.,1.),r(-1.,1.)).normal())
def test_internal_coherance(axes_i):
sum = Numeric.add.reduce
r = random.uniform
a1, a2, a3 = r(-180,180),r(-180,180),r(-180,180)
angles_i = Numeric.array([a1, a2, a3])
angles_r = angles_i[::-1]
axes_r = axes_i[2], axes_i[1], axes_i[0]
rotdnz_new = ThreeAxisRotation(angles_i/r2d, rotationAxes=axes_i, inversAxesOrder=1)
rotdnz_inv = ThreeAxisRotation(angles_r/r2d, rotationAxes=axes_r, inversAxesOrder=0)
diffmat1 = sum(sum(Numeric.absolute(rotdnz_new.tensor - rotdnz_inv.tensor)))
sol1_new, sol2_new = rotdnz_new.getAngles()
sol1_inv, sol2_inv = rotdnz_inv.getAngles()
diffinit = min(sum(Numeric.absolute(sol1_new*r2d-angles_i)),
sum(Numeric.absolute(sol2_new*r2d-angles_i)))
diffinv = min(sum(Numeric.absolute(sol1_inv[::-1]*r2d-angles_i)),
sum(Numeric.absolute(sol2_inv[::-1]*r2d-angles_i)))
#print rotdnz_new.asQuaternion()
if diffmat1 > 1e-12 or diffinit > 2e-11 or diffinv > 2e-11:
print "I"+3*"%10.2f" % tuple(Numeric.array(angles_i))
print "N"+3*"%10.2f" % tuple(Numeric.array(sol1_new)*r2d)
print "N"+3*"%10.2f" % tuple(Numeric.array(sol2_new)*r2d)
print 'Matrix difference \t%.1e' % diffmat1
print 'Init Angle difference \t%.1e' % diffinit
print 'Inv Angle difference \t%.1e' % diffinv
if __name__ == '__main__':
import random
for a in range(300):
axes = random_3axes()
test_internal_coherance(axes)
for axes in (StandardAxes, DenzoAxes, NoniusKappaAxes, EulerAxes, random_3axes()):
print axes
for a in range(300):
test_internal_coherance(axes)
| bsd-3-clause |
adamwwt/chvac | venv/lib/python2.7/site-packages/coverage/templite.py | 160 | 6868 | """A simple Python template renderer, for a nano-subset of Django syntax."""
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
import re
from coverage.backward import set # pylint: disable=W0622
class CodeBuilder(object):
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
self.indent_amount = indent
def add_line(self, line):
"""Add a line of source to the code.
Don't include indentations or newlines.
"""
self.code.append(" " * self.indent_amount)
self.code.append(line)
self.code.append("\n")
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
sect = CodeBuilder(self.indent_amount)
self.code.append(sect)
return sect
def indent(self):
"""Increase the current indent for following lines."""
self.indent_amount += 4
def dedent(self):
"""Decrease the current indent for following lines."""
self.indent_amount -= 4
def __str__(self):
return "".join([str(c) for c in self.code])
def get_function(self, fn_name):
"""Compile the code, and return the function `fn_name`."""
assert self.indent_amount == 0
g = {}
code_text = str(self)
exec(code_text, g)
return g[fn_name]
class Templite(object):
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifer.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string.
"""
def __init__(self, text, *contexts):
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.text = text
self.context = {}
for context in contexts:
self.context.update(context)
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
code.add_line("def render(ctx, dot):")
code.indent()
vars_code = code.add_section()
self.all_vars = set()
self.loop_vars = set()
code.add_line("result = []")
code.add_line("a = result.append")
code.add_line("e = result.extend")
code.add_line("s = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("a(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("e([%s])" % ",".join(buffered))
del buffered[:]
# Split the text to form a list of tokens.
toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
ops_stack = []
for tok in toks:
if tok.startswith('{{'):
# An expression to evaluate.
buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip()))
elif tok.startswith('{#'):
# Comment: ignore it and move on.
continue
elif tok.startswith('{%'):
# Action tag: split into words and parse further.
flush_output()
words = tok[2:-2].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
assert len(words) == 2
ops_stack.append('if')
code.add_line("if %s:" % self.expr_code(words[1]))
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
assert len(words) == 4 and words[2] == 'in'
ops_stack.append('for')
self.loop_vars.add(words[1])
code.add_line(
"for c_%s in %s:" % (
words[1],
self.expr_code(words[3])
)
)
code.indent()
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack
end_what = words[0][3:]
if ops_stack[-1] != end_what:
raise SyntaxError("Mismatched end tag: %r" % end_what)
ops_stack.pop()
code.dedent()
else:
raise SyntaxError("Don't understand tag: %r" % words[0])
else:
# Literal content. If it isn't empty, output it.
if tok:
buffered.append("%r" % tok)
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name))
if ops_stack:
raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1])
code.add_line("return ''.join(result)")
code.dedent()
self.render_function = code.get_function('render')
def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self.expr_code(dots[0])
args = [repr(d) for d in dots[1:]]
code = "dot(%s, %s)" % (code, ", ".join(args))
else:
self.all_vars.add(expr)
code = "c_%s" % expr
return code
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
ctx = dict(self.context)
if context:
ctx.update(context)
return self.render_function(ctx, self.do_dots)
def do_dots(self, value, *dots):
"""Evaluate dotted expressions at runtime."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
if hasattr(value, '__call__'):
value = value()
return value
| mit |
awagnon/maraschino | lib/flask/testsuite/ext.py | 57 | 4720 | # -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
import unittest
from flask.testsuite import FlaskTestCase
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in sys.modules.items():
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_('.' not in key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in xrange(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_(exc_type is ImportError)
self.assert_equal(str(exc_value), 'No module named missing_module')
self.assert_(tb.tb_frame.f_globals is globals())
next = tb.tb_next
self.assert_('flask_broken/__init__.py' in next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
| mit |
kubum/elasticsearch | dev-tools/download-s3.py | 236 | 2295 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 download requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def download_s3(conn, path, key, file, bucket):
print 'Downloading %s from Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.get_bucket(bucket)
k = bucket.get_key(os.path.join(path, key))
k.get_contents_to_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Downloads a bucket from Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='path to store the bucket to', required=True)
parser.add_argument('--bucket', '-b', default='downloads.elasticsearch.org',
help='The S3 Bucket to download from')
parser.add_argument('--path', '-p', default='',
help='The key path to use')
parser.add_argument('--key', '-k', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
download_s3(connection, args.path, key, args.file, args.bucket);
| apache-2.0 |
ad-m/python-anticaptcha | python_anticaptcha/fields.py | 1 | 5444 | import six
from python_anticaptcha.exceptions import InvalidWidthException, MissingNameException
class BaseField(object):
label = None
labelHint = None
def serialize(self, name=None):
data = {}
if self.label:
data["label"] = self.label or False
if self.labelHint:
data["labelHint"] = self.labelHint or False
return data
class NameBaseField(BaseField):
name = None
def serialize(self, name=None):
data = super(NameBaseField, self).serialize(name)
if name:
data["name"] = name
elif self.name:
data["name"] = self.name
else:
raise MissingNameException(cls=self.__class__)
return data
class SimpleText(BaseField):
contentType = "text"
def __init__(self, content, label=None, labelHint=None, width=None):
self.label = label
self.labelHint = labelHint
self.content = content
self.width = width
def serialize(self, name=None):
data = super(SimpleText, self).serialize(name)
data["contentType"] = self.contentType
data["content"] = self.content
if self.width:
if self.width not in [100, 50, 33, 25]:
raise InvalidWidthException(self.width)
data["inputOptions"] = {}
data["width"] = self.width
return data
class Image(BaseField):
contentType = "image"
def __init__(self, imageUrl, label=None, labelHint=None):
self.label = label
self.labelHint = labelHint
self.imageUrl = imageUrl
def serialize(self, name=None):
data = super(Image, self).serialize(name)
data["contentType"] = self.contentType
data["content"] = self.imageUrl
return data
class WebLink(BaseField):
contentType = "link"
def __init__(self, linkText, linkUrl, label=None, labelHint=None, width=None):
self.label = label
self.labelHint = labelHint
self.linkText = linkText
self.linkUrl = linkUrl
self.width = width
def serialize(self, name=None):
data = super(WebLink, self).serialize(name)
data["contentType"] = self.contentType
if self.width:
if self.width not in [100, 50, 33, 25]:
raise InvalidWidthException(self.width)
data["inputOptions"] = {}
data["width"] = self.width
data.update({"content": {"url": self.linkUrl, "text": self.linkText}})
return data
class TextInput(NameBaseField):
def __init__(self, placeHolder=None, label=None, labelHint=None, width=None):
self.label = label
self.labelHint = labelHint
self.placeHolder = placeHolder
self.width = width
def serialize(self, name=None):
data = super(TextInput, self).serialize(name)
data["inputType"] = "text"
data["inputOptions"] = {}
if self.width:
if self.width not in [100, 50, 33, 25]:
raise InvalidWidthException(self.width)
data["inputOptions"]["width"] = str(self.width)
if self.placeHolder:
data["inputOptions"]["placeHolder"] = self.placeHolder
return data
class Textarea(NameBaseField):
def __init__(
self, placeHolder=None, rows=None, label=None, width=None, labelHint=None
):
self.label = label
self.labelHint = labelHint
self.placeHolder = placeHolder
self.rows = rows
self.width = width
def serialize(self, name=None):
data = super(Textarea, self).serialize(name)
data["inputType"] = "textarea"
data["inputOptions"] = {}
if self.rows:
data["inputOptions"]["rows"] = str(self.rows)
if self.placeHolder:
data["inputOptions"]["placeHolder"] = self.placeHolder
if self.width:
data["inputOptions"]["width"] = str(self.width)
return data
class Checkbox(NameBaseField):
def __init__(self, text, label=None, labelHint=None):
self.label = label
self.labelHint = labelHint
self.text = text
def serialize(self, name=None):
data = super(Checkbox, self).serialize(name)
data["inputType"] = "checkbox"
data["inputOptions"] = {"label": self.text}
return data
class Select(NameBaseField):
type = "select"
def __init__(self, label=None, choices=None, labelHint=None):
self.label = label
self.labelHint = labelHint
self.choices = choices or ()
def get_choices(self):
for choice in self.choices:
if isinstance(choice, six.text_type):
yield choice, choice
else:
yield choice
def serialize(self, name=None):
data = super(Select, self).serialize(name)
data["inputType"] = self.type
data["inputOptions"] = []
for value, caption in self.get_choices():
data["inputOptions"].append({"value": value, "caption": caption})
return data
class Radio(Select):
type = "radio"
class ImageUpload(NameBaseField):
def __init__(self, label=None, labelHint=None):
self.label = label
self.labelHint = labelHint
def serialize(self, name=None):
data = super(ImageUpload, self).serialize(name)
data["inputType"] = "imageUpload"
return data
| mit |
CentreForCorpusResearch/clic | clic/normalizer.py | 2 | 1240 | # -*- coding: utf-8 -*-
'''
Defines normalizers that can be used in the cheshire3 indexing workflow.
'''
from cheshire3.normalizer import SimpleNormalizer
class NovelNormalizer(SimpleNormalizer):
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.novels = ['BH', 'BR', 'DC',
'DS', 'ED', 'GE', 'HT', 'LD', 'MC', 'NN',
'OCS', 'OMF', 'OT', 'PP', 'TTC']
def process_string(self, session, data):
if data in self.novels:
return 'novel'
else:
return 'other'
class CorpusNormalizer(SimpleNormalizer):
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.dickens = ['AN', 'BH', 'BL', 'BR', 'CC', 'CHI', 'CH', 'DC',
'DS', 'ED', 'GE', 'HM', 'HT', 'LD', 'MC', 'NN',
'OCS', 'OMF', 'OT', 'PP', 'SB', 'TTC', 'UT']
# self.austen = ['E']
def process_string(self, session, data):
if data in self.dickens:
return 'dickens'
# elif data in self.austen:
# return 'austen'
else:
return 'ntc'
| mit |
TDAbboud/micropython | tests/micropython/viper_error.py | 27 | 1989 | # test syntax and type errors specific to viper code generation
def test(code):
try:
exec(code)
except (SyntaxError, ViperTypeError, NotImplementedError) as e:
print(repr(e))
# viper: annotations must be identifiers
test("@micropython.viper\ndef f(a:1): pass")
test("@micropython.viper\ndef f() -> 1: pass")
# unknown type
test("@micropython.viper\ndef f(x:unknown_type): pass")
# too many arguments
test("@micropython.viper\ndef f(a, b, c, d, e): pass")
# local used before type known
test("""
@micropython.viper
def f():
print(x)
x = 1
""")
# type mismatch storing to local
test("""
@micropython.viper
def f():
x = 1
y = []
x = y
""")
# can't implicitly convert type to bool
test("""
@micropython.viper
def f():
x = ptr(0)
if x:
pass
""")
# incorrect return type
test("@micropython.viper\ndef f() -> int: return []")
# can't do binary op between incompatible types
test("@micropython.viper\ndef f(): 1 + []")
# can't load
test("@micropython.viper\ndef f(): 1[0]")
test("@micropython.viper\ndef f(): 1[x]")
# can't store
test("@micropython.viper\ndef f(): 1[0] = 1")
test("@micropython.viper\ndef f(): 1[x] = 1")
test("@micropython.viper\ndef f(x:int): x[0] = x")
test("@micropython.viper\ndef f(x:ptr32): x[0] = None")
test("@micropython.viper\ndef f(x:ptr32): x[x] = None")
# must raise an object
test("@micropython.viper\ndef f(): raise 1")
# unary ops not implemented
test("@micropython.viper\ndef f(x:int): +x")
test("@micropython.viper\ndef f(x:int): -x")
test("@micropython.viper\ndef f(x:int): ~x")
# binary op not implemented
test("@micropython.viper\ndef f(x:int): res = x in x")
# yield (from) not implemented
test("@micropython.viper\ndef f(): yield")
test("@micropython.viper\ndef f(): yield from f")
# passing a ptr to a Python function not implemented
test("@micropython.viper\ndef f(): print(ptr(1))")
# cast of a casting identifier not implemented
test("@micropython.viper\ndef f(): int(int)")
| mit |
Matt-Deacalion/Mentalist | mentalist/settings/local.py | 1 | 2932 | """Development settings and globals."""
from .base import *
# ──┤ DEBUG CONFIGURATION ├───────────────────────────────────────────────────┐
# See: http://serk.io/ref/settings/#debug
DEBUG = True
# See: http://serk.io/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# ────────────────────────────────────────────────────────────────────────────┘
# ──┤ BASE URL ├──────────────────────────────────────────────────────────────┐
BASE_URL = 'http://127.0.0.1:8000'
# ────────────────────────────────────────────────────────────────────────────┘
# ──┤ EMAIL CONFIGURATION ├───────────────────────────────────────────────────┐
# See: http://serk.io/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# ────────────────────────────────────────────────────────────────────────────┘
# ──┤ DATABASE CONFIGURATION ├────────────────────────────────────────────────┐
# See: http://serk.io/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mentalist',
'USER': 'mentalist',
'PASSWORD': 'mentalist',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# ────────────────────────────────────────────────────────────────────────────┘
# ──┤ CACHE CONFIGURATION ├───────────────────────────────────────────────────┐
# See: http://serk.io/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# ────────────────────────────────────────────────────────────────────────────┘
| mit |
woltage/ansible | lib/ansible/plugins/action/copy.py | 9 | 15017 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import pipes
import stat
import tempfile
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
from ansible.utils.unicode import to_bytes
from ansible.parsing.vault import VaultLib
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for file transfer operations '''
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
faf = self._task.first_available_file
if (source is None and content is None and faf is None) or dest is None:
return dict(failed=True, msg="src (or content) and dest are required")
elif (source is not None or faf is not None) and content is not None:
return dict(failed=True, msg="src and content are mutually exclusive")
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
source_trailing_slash = source.endswith(os.sep)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
return dict(failed=True, msg="could not write content temp file: %s" % err)
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif faf:
#FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead
found = False
for fn in faf:
fn_orig = fn
fnt = self._templar.template(fn)
fnd = self._loader.path_dwim_relative(self._task._role._role_path, 'files', fnt)
of = task_vars.get('_original_file', None)
if not os.path.exists(fnd) and of is not None:
fnd = self._loader.path_dwim_relative(of, 'files', of)
if os.path.exists(fnd):
source = fnd
found = True
break
if not found:
return dict(failed=True, msg="could not find src in first_available_file list")
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim(source)
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = []
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(source):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
sz = len(source)
else:
sz = len(source.rsplit('/', 1)[0]) + 1
# Walk the directory and append the file tuples to source_files.
for base_path, sub_folders, files in os.walk(source):
for file in files:
full_path = os.path.join(base_path, file)
rel_path = full_path[sz:]
source_files.append((full_path, rel_path))
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
changed = False
diffs = []
module_result = {"changed": False}
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = (len(source_files) == 1)
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest, tmp)
for source_full, source_rel in source_files:
# Generate a hash of the local file.
local_checksum = checksum(source_full)
# If local_checksum is not defined we can't find the file so we should fail out.
if local_checksum is None:
return dict(failed=True, msg="could not find src=%s" % source_full)
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Attempt to get the remote checksum
remote_checksum = self._remote_checksum(tmp, dest_file)
if remote_checksum == '3':
# The remote_checksum was executed on a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
return dict(failed=True, msg="can not use content with a dir as dest")
else:
# Append the relative source location to the destination and retry remote_checksum
dest_file = self._connection._shell.join_path(dest, source_rel)
remote_checksum = self._remote_checksum(tmp, dest_file)
if remote_checksum != '1' and not force:
# remote_file does not exist so continue to next iteration.
continue
if local_checksum != remote_checksum:
# The checksums don't match and we will change or error out.
changed = True
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# FIXME: runner shouldn't have the diff option there
#if self.runner.diff and not raw:
# diff = self._get_diff_data(tmp, dest_file, source_full, task_vars)
#else:
# diff = {}
diff = {}
if self._connection_info.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
# FIXME: diff stuff
#diffs.append(diff)
changed = True
module_return = dict(changed=True)
continue
# Define a remote directory that we will copy the file to.
tmp_src = tmp + 'source'
if not raw:
self._connection.put_file(source_full, tmp_src)
else:
self._connection.put_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
# Continue to next iteration if raw is defined.
continue
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
if raw:
# Continue to next iteration if raw is defined.
# self._remove_tmp_path(tmp)
continue
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel
)
)
# Execute the file module.
module_return = self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
if module_return.get('failed') == True:
return module_return
if module_return.get('changed') == True:
changed = True
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# Delete tmp path if we were recursive or if we did not execute a module.
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
# TODO: Support detailed status/diff for multiple files
if module_executed and len(source_files) == 1:
result = module_return
else:
result = dict(dest=dest, src=source, changed=changed)
if len(diffs) == 1:
result['diff']=diffs[0]
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _get_diff_data(self, tmp, destination, source, task_vars):
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0:
return {}
diff = {}
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
# FIXME: this should not be in utils..
#elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
# diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
else:
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, tmp=tmp, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
src = open(source)
src_contents = src.read(8192)
st = os.stat(source)
if "\x00" in src_contents:
diff['src_binary'] = 1
# FIXME: this should not be in utils
#elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
# diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
else:
src.seek(0)
diff['after_header'] = source
diff['after'] = src.read()
return diff
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
| gpl-3.0 |
xiaoyaozi5566/GEM5_DRAMSim2 | src/arch/x86/isa/insts/simd128/floating_point/logical/andp.py | 91 | 4275 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop ANDPS_XMM_XMM {
mand xmml, xmml, xmmlm
mand xmmh, xmmh, xmmhm
};
def macroop ANDPS_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop ANDPS_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop ANDPD_XMM_XMM {
mand xmml, xmml, xmmlm
mand xmmh, xmmh, xmmhm
};
def macroop ANDPD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop ANDPD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop ANDNPS_XMM_XMM {
mandn xmml, xmml, xmmlm
mandn xmmh, xmmh, xmmhm
};
def macroop ANDNPS_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
def macroop ANDNPS_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
def macroop ANDNPD_XMM_XMM {
mandn xmml, xmml, xmmlm
mandn xmmh, xmmh, xmmhm
};
def macroop ANDNPD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
def macroop ANDNPD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
'''
| bsd-3-clause |
porduna/weblabdeusto | server/src/weblab/admin/script/httpd_config_generate.py | 3 | 8900 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import os
import time
from collections import OrderedDict
from weblab.util import data_filename
import flask_admin
def weblab_httpd_config_generate(directory):
print("Generating HTTPd configuration files... ", end='')
result = httpd_config_generate(directory)
print("[done]")
return result
def httpd_config_generate(directory):
debugging_variables = {}
execfile(os.path.join(directory, 'debugging.py'), debugging_variables)
ports = debugging_variables.get('PORTS', {}).get('json')
base_url = debugging_variables.get('BASE_URL', '')
if base_url in ('','/'):
base_url = ''
static_directories = OrderedDict() #{
# url path : disk path
# }
static_directories[base_url + '/weblab/client'] = data_filename('weblab/core/static/oldclient').replace('\\','/') # \ => / for Windows
flask_admin_static = os.path.join(os.path.dirname(flask_admin.__file__), 'static')
static_directories[base_url + '/weblab/admin/static'] = flask_admin_static.replace('\\','/')
# TODO: Avoid repeated paths
static_directories[base_url + '/weblab/instructor/static'] = data_filename('weblab/admin/web/static').replace('\\','/')
static_directories[base_url + '/weblab/profile/static'] = data_filename('weblab/admin/web/static').replace('\\','/')
static_directories[base_url + '/weblab/web/static'] = data_filename('weblab/core/static').replace('\\','/')
static_directories[base_url + '/weblab/static'] = data_filename('weblab/core/static').replace('\\','/')
static_directories[base_url + '/weblab/gwt/weblabclientlab'] = data_filename('war/weblabclientlab').replace('\\','/')
static_directories[base_url + '/weblab/web/pub'] = os.path.abspath(os.path.join(directory, 'pub')).replace('\\','/')
files = {}
apache_contents = _apache_generation(directory, base_url, ports, static_directories)
files['apache'] = _set_contents(directory, 'httpd/apache_weblab_generic.conf', apache_contents)
simple_httpd_contents = _simple_httpd_generation(directory, base_url, ports, static_directories)
files['simple'] = _set_contents(directory, 'httpd/simple_server_config.py', simple_httpd_contents)
# TODO: support nginx
return files
def _set_contents(directory, filename, new_contents):
original_path = os.path.join(directory, filename)
destination_path = os.path.join(directory, filename + "-backup-" + time.strftime("%Y-%m-%d_%H-%M-%S"))
if os.path.exists(original_path):
original_contents = open(original_path).read()
open(destination_path, 'w').write(original_contents)
open(original_path, 'w').write(new_contents)
return os.path.abspath(original_path)
def _apache_generation(directory, base_url, ports, static_directories):
apache_conf = (
"\n"
"""<LocationMatch (.*)nocache\.js$>\n"""
""" Header Set Cache-Control "max-age=0, no-store"\n"""
"""</LocationMatch>\n"""
"""\n"""
"""<Files *.cache.*>\n"""
""" Header Set Cache-Control "max-age=2592000"\n"""
"""</Files>\n"""
"""\n"""
"""# Apache redirects the regular paths to the particular directories \n"""
# """RedirectMatch ^%(root)s$ %(root)s/weblab/\n"""
# """RedirectMatch ^%(root)s/$ %(root)s/weblab/\n"""
"""RedirectMatch ^%(root)s/weblab$ %(root)s/weblab/\n"""
"""RedirectMatch ^%(root)s/weblab/client/$ %(root)s/weblab/client/index.html\n"""
"""\n""")
for static_url, static_directory in static_directories.items():
apache_conf += """Alias %(static_url)s %(static_directory)s\n""" % dict(static_url=static_url, static_directory=static_directory)
apache_conf += (
"""\n"""
"""<Location %(root)s/weblab/>\n"""
""" <IfModule authz_core_module>\n"""
""" Require all granted\n"""
""" </IfModule>\n"""
"""\n"""
""" <IfModule !authz_core_module>\n"""
""" Order allow,deny\n"""
""" Allow from All\n"""
""" </IfModule>\n"""
"""</Location>\n"""
"""\n"""
"""<Directory "%(directory)s">\n"""
""" Options Indexes FollowSymLinks\n"""
"""\n"""
""" <IfModule authz_core_module>\n"""
""" Require all granted\n"""
""" </IfModule>\n"""
"""\n"""
""" <IfModule !authz_core_module>\n"""
""" Order allow,deny\n"""
""" Allow from All\n"""
""" </IfModule>\n"""
"""</Directory>\n"""
"""\n""")
previous = []
for static_directory in static_directories.values():
if static_directory in previous:
continue
previous.append(static_directory)
apache_conf += ("""<Directory "%(static_directory)s">\n"""
""" Options Indexes FollowSymLinks\n"""
"""\n"""
""" <IfModule authz_core_module>\n"""
""" Require all granted\n"""
""" </IfModule>\n"""
"""\n"""
""" <IfModule !authz_core_module>\n"""
""" Order allow,deny\n"""
""" Allow from All\n"""
""" </IfModule>\n"""
"""</Directory>\n"""
"""\n""") % dict(static_directory=static_directory)
apache_conf += (
"""# Apache redirects the requests retrieved to the particular server, using a stickysession if the sessions are based on memory\n"""
"""ProxyPreserveHost On\n"""
"""ProxyVia On\n"""
"""\n""")
for static_url, static_directory in static_directories.items():
apache_conf += """ProxyPass %(static_url)s !\n""" % dict(static_url=static_url)
apache_conf += (
"""\n"""
"""ProxyPass %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid lbmethod=bybusyness\n"""
"""ProxyPassReverse %(root)s/weblab/ balancer://%(root-no-slash)s_weblab_cluster/ stickysession=weblabsessionid\n"""
"\n")
apache_conf += "\n"
apache_conf += """<Proxy balancer://%(root-no-slash)s_weblab_cluster>\n"""
for pos, port in enumerate(ports):
d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' }
apache_conf += """ BalancerMember http://localhost:%(port)s/weblab route=%(route)s\n""" % d
apache_conf += """</Proxy>\n"""
apache_img_dir = '/client/images'
apache_root_without_slash = base_url[1:] if base_url.startswith('/') else base_url
server_conf_dict = { 'root' : base_url, 'root-no-slash' : apache_root_without_slash.replace('/','_'),
'directory' : os.path.abspath(directory).replace('\\','/'),
'war_path' : data_filename('war').replace('\\','/') }
apache_conf = apache_conf % server_conf_dict
apache_conf_path = os.path.join('', 'apache_weblab_generic.conf')
return apache_conf
def _simple_httpd_generation(directory, base_url, ports, static_directories):
proxy_paths = [
('%(root)s$', 'redirect:%(root)s/weblab/'),
('%(root)s/$', 'redirect:%(root)s/weblab/'),
('%(root)s/weblab/client$', 'redirect:%(root)s/weblab/client/index.html'),
]
for key, directory in static_directories.items():
proxy_paths.append((key, 'file:{0}'.format(directory)))
proxy_path = "proxy-sessions:weblabsessionid:"
for pos, port in enumerate(ports):
d = { 'port' : port, 'route' : 'route%s' % (pos+1), 'root' : '%(root)s' }
proxy_path += '%(route)s=http://localhost:%(port)s/weblab/,' % d
proxy_paths.append(('%(root)s/weblab/', proxy_path))
proxy_paths.append(('%(root)s/weblab', 'redirect:%(root)s/weblab/'))
proxy_paths.append(('', 'redirect:%(root)s/weblab/'))
if base_url in ('','/'):
root = ''
else:
root = base_url
apache_img_dir = '/client/images'
server_conf_dict = { 'root' : root,
'directory' : os.path.abspath(directory).replace('\\','/')
}
proxy_paths = eval(repr(proxy_paths) % server_conf_dict)
proxy_paths_str = "PATHS = [ \n"
for proxy_path in proxy_paths:
proxy_paths_str += " %s,\n" % repr(proxy_path)
proxy_paths_str += "]\n"
return proxy_paths_str
if __name__ == '__main__':
httpd_config_generate("/tmp/foo")
| bsd-2-clause |
evanma92/routeh | flask/lib/python2.7/site-packages/requests/auth.py | 413 | 6794 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| bsd-3-clause |
botswana-harvard/edc-registration | edc_registration/model_mixins/registered_subject_model_mixin.py | 1 | 8797 | import re
from django.apps import apps as django_apps
from django.core.validators import RegexValidator
from django.db import models, transaction
from django.utils.translation import ugettext as _
from django_crypto_fields.fields import FirstnameField, LastnameField
from django_crypto_fields.fields import IdentityField, EncryptedCharField
from edc_base.model_fields import IdentityTypeField, IsDateEstimatedField
from edc_base.utils import get_uuid
from edc_constants.choices import YES, NO, GENDER
from edc_constants.constants import UUID_PATTERN
from edc_identifier.model_mixins import UniqueSubjectIdentifierModelMixin
from ..exceptions import RegisteredSubjectError
edc_protocol_app_config = django_apps.get_app_config('edc_protocol')
YES_NO_UNKNOWN = (
(YES, 'Yes'),
(NO, 'No'),
('?', 'Unknown'),
)
class RegisteredSubjectModelMixin(UniqueSubjectIdentifierModelMixin,
models.Model):
"""A model mixin for the RegisteredSubject model (only).
"""
# may not be available when instance created (e.g. infants prior to birth
# report)
first_name = FirstnameField(null=True)
# may not be available when instance created (e.g. infants or household
# subject before consent)
last_name = LastnameField(
verbose_name="Last name",
null=True)
# may not be available when instance created (e.g. infants)
initials = EncryptedCharField(
validators=[RegexValidator(
regex=r'^[A-Z]{2,3}$',
message=('Ensure initials consist of letters '
'only in upper case, no spaces.'))],
null=True)
dob = models.DateField(
verbose_name=_("Date of birth"),
null=True,
blank=False,
help_text=_("Format is YYYY-MM-DD"))
is_dob_estimated = IsDateEstimatedField(
verbose_name=_("Is date of birth estimated?"),
null=True,
blank=False)
gender = models.CharField(
verbose_name="Gender",
max_length=1,
choices=GENDER,
null=True,
blank=False)
subject_consent_id = models.CharField(
max_length=100,
null=True,
blank=True)
registration_identifier = models.CharField(
max_length=36,
null=True,
blank=True)
sid = models.CharField(
verbose_name="SID",
max_length=15,
null=True,
blank=True)
subject_type = models.CharField(
max_length=25,
null=True,
blank=True)
relative_identifier = models.CharField(
verbose_name="Identifier of immediate relation",
max_length=36,
null=True,
blank=True,
help_text="For example, mother's identifier, if available / appropriate")
identity = IdentityField(
null=True,
blank=True)
identity_or_pk = models.CharField(
verbose_name="identity or pk",
max_length=75,
unique=True,
default=get_uuid,
editable=False)
identity_type = IdentityTypeField(
null=True,
blank=True)
screening_identifier = models.CharField(
max_length=36,
null=True,
blank=True)
screening_datetime = models.DateTimeField(
null=True,
blank=True)
screening_age_in_years = models.IntegerField(
null=True,
blank=True)
registration_datetime = models.DateTimeField(
null=True,
blank=True)
""" for simplicity, if going straight from screen to rando,
update both registration date and randomization date """
randomization_datetime = models.DateTimeField(
null=True,
blank=True)
registration_status = models.CharField(
verbose_name="Registration status",
max_length=25,
null=True,
blank=True)
consent_datetime = models.DateTimeField(
null=True,
blank=True)
comment = models.TextField(
verbose_name='Comment',
max_length=250,
null=True,
blank=True)
additional_key = models.CharField(
max_length=36,
verbose_name='-',
editable=False,
default=None,
null=True,
help_text=('A uuid (or some other text value) to be added to bypass the '
'unique constraint of just firstname, initials, and dob.'
'The default constraint proves limiting since the source '
'model usually has some other attribute in additional to '
'first_name, initials and dob which is not captured in '
'this model'))
dm_comment = models.CharField(
verbose_name="Data Management comment",
max_length=150,
null=True,
editable=False)
def save(self, *args, **kwargs):
if self.identity:
self.additional_key = None
self.identity_or_pk = self.identity
self.set_uuid_as_subject_identifier_if_none()
self.raise_on_duplicate('subject_identifier')
self.raise_on_duplicate('identity')
self.raise_on_changed_subject_identifier()
super().save(*args, **kwargs)
def natural_key(self):
return (self.subject_identifier_as_pk, )
def __str__(self):
return self.mask_subject_identifier()
def update_subject_identifier_on_save(self):
"""Overridden to not set the subject identifier on save.
"""
if not self.subject_identifier:
self.subject_identifier = self.subject_identifier_as_pk.hex
elif re.match(UUID_PATTERN, self.subject_identifier):
pass
return self.subject_identifier
def make_new_identifier(self):
return self.subject_identifier_as_pk.hex
def mask_subject_identifier(self):
if not self.subject_identifier_is_set():
return '<identifier not set>'
return self.subject_identifier
def subject_identifier_is_set(self):
obj = self.__class__.objects.get(pk=self.id)
if re.match(UUID_PATTERN, obj.subject_identifier):
return False
return True
def raise_on_changed_subject_identifier(self):
"""Raises an exception if there is an attempt to change
the subject identifier for an existing instance if the subject
identifier is already set.
"""
if self.id and self.subject_identifier_is_set():
with transaction.atomic():
obj = self.__class__.objects.get(pk=self.id)
if obj.subject_identifier != self.subject_identifier_as_pk.hex:
if self.subject_identifier != obj.subject_identifier:
raise RegisteredSubjectError(
'Subject identifier cannot be changed for '
'existing registered subject. Got {} <> {}.'.format(
self.subject_identifier, obj.subject_identifier))
def raise_on_duplicate(self, attrname):
"""Checks if the subject identifier (or other attr) is in use,
for new and existing instances.
"""
if getattr(self, attrname):
with transaction.atomic():
error_msg = (
'Cannot {{action}} registered subject with a duplicate '
'\'{}\'. Got {}.'.format(attrname, getattr(self, attrname)))
try:
obj = self.__class__.objects.exclude(
**{'pk': self.pk} if self.id else {}).get(
**{attrname: getattr(self, attrname)})
if not self.id:
raise RegisteredSubjectError(
error_msg.format(action='insert'))
elif self.subject_identifier_is_set() and obj.id != self.id:
raise RegisteredSubjectError(
error_msg.format(action='update'))
else:
raise RegisteredSubjectError(
error_msg.format(action='update'))
except self.__class__.DoesNotExist:
pass
def set_uuid_as_subject_identifier_if_none(self):
"""Inserts a random uuid as a dummy identifier for a new
instance.
Model uses subject_identifier_as_pk as a natural key for
serialization/deserialization. Value must not change
once set.
"""
if not self.subject_identifier:
self.subject_identifier = self.subject_identifier_as_pk.hex
class Meta:
abstract = True
verbose_name = 'Registered Subject'
ordering = ['subject_identifier']
unique_together = ('first_name', 'dob', 'initials', 'additional_key')
| gpl-2.0 |
agx/git-buildpackage | gbp/pkg/compressor.py | 1 | 2348 | # vim: set fileencoding=utf-8 :
#
# (C) 2017 Guido Günther <agx@sigxcpu.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
class Compressor(object):
# Map frequently used names of compression types to the internal ones:
Aliases = {'bz2': 'bzip2',
'gz': 'gzip', }
Opts = {'gzip': '-n',
'bzip2': '',
'lzma': '',
'xz': ''}
Exts = {'gzip': 'gz',
'bzip2': 'bz2',
'lzma': 'lzma',
'xz': 'xz'}
def __init__(self, type_, level=None):
self._type = type_
self._level = int(level) if level not in [None, ''] else None
def is_known(self):
return self.type in self.Opts.keys()
@property
def type(self):
return self._type
@property
def level(self):
return self._level
@property
def _level_opt(self):
return '-%d' % self.level if self.level is not None else ''
@property
def _more_opts(self):
return self.Opts.get(self._type, '')
def cmdline(self, stdout=True):
"""
>>> Compressor('gzip', level=9).cmdline()
'gzip -9 -n -c'
>>> Compressor('gzip').cmdline(True)
'gzip -n -c'
"""
return "%s %s %s %s" % (self.type, self._level_opt, self._more_opts,
"-c" if stdout else '')
def __repr__(self):
"""
>>> Compressor('gzip').__repr__()
"<compressor type='gzip' >"
>>> Compressor('gzip', 9).__repr__()
"<compressor type='gzip' level=9>"
"""
level_str = "level=%s" % self.level if self.level is not None else ''
return "<compressor type='%s' %s>" % (self.type, level_str)
| gpl-2.0 |
hugeinc/behave-parallel | test/test_runner.py | 1 | 31768 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from collections import defaultdict
import os.path
import StringIO
import sys
import warnings
import tempfile
from mock import Mock, patch
from nose.tools import *
import unittest
from behave import model, parser, runner, step_registry
from behave.configuration import ConfigError
from behave.log_capture import LoggingCapture
from behave.formatter.base import StreamOpener
class TestContext(object):
def setUp(self):
r = Mock()
self.config = r.config = Mock()
r.config.verbose = False
self.context = runner.Context(r)
def test_user_mode_shall_restore_behave_mode(self):
# -- CASE: No exception is raised.
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
self.context.thing = 'stuff'
eq_(self.context._mode, runner.Context.BEHAVE)
def test_user_mode_shall_restore_behave_mode_if_assert_fails(self):
try:
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
assert False, "XFAIL"
except AssertionError:
eq_(self.context._mode, runner.Context.BEHAVE)
def test_user_mode_shall_restore_behave_mode_if_exception_is_raised(self):
try:
with self.context.user_mode():
eq_(self.context._mode, runner.Context.USER)
raise RuntimeError("XFAIL")
except Exception:
eq_(self.context._mode, runner.Context.BEHAVE)
def test_context_contains(self):
eq_('thing' in self.context, False)
self.context.thing = 'stuff'
eq_('thing' in self.context, True)
self.context._push()
eq_('thing' in self.context, True)
def test_attribute_set_at_upper_level_visible_at_lower_level(self):
self.context.thing = 'stuff'
self.context._push()
eq_(self.context.thing, 'stuff')
def test_attribute_set_at_lower_level_not_visible_at_upper_level(self):
self.context._push()
self.context.thing = 'stuff'
self.context._pop()
assert getattr(self.context, 'thing', None) is None
def test_attributes_set_at_upper_level_visible_at_lower_level(self):
self.context.thing = 'stuff'
self.context._push()
eq_(self.context.thing, 'stuff')
self.context.other_thing = 'more stuff'
self.context._push()
eq_(self.context.thing, 'stuff')
eq_(self.context.other_thing, 'more stuff')
self.context.third_thing = 'wombats'
self.context._push()
eq_(self.context.thing, 'stuff')
eq_(self.context.other_thing, 'more stuff')
eq_(self.context.third_thing, 'wombats')
def test_attributes_set_at_lower_level_not_visible_at_upper_level(self):
self.context.thing = 'stuff'
self.context._push()
self.context.other_thing = 'more stuff'
self.context._push()
self.context.third_thing = 'wombats'
eq_(self.context.thing, 'stuff')
eq_(self.context.other_thing, 'more stuff')
eq_(self.context.third_thing, 'wombats')
self.context._pop()
eq_(self.context.thing, 'stuff')
eq_(self.context.other_thing, 'more stuff')
assert getattr(self.context, 'third_thing', None) is None, '%s is not None' % self.context.third_thing
self.context._pop()
eq_(self.context.thing, 'stuff')
assert getattr(self.context, 'other_thing', None) is None, '%s is not None' % self.context.other_thing
assert getattr(self.context, 'third_thing', None) is None, '%s is not None' % self.context.third_thing
def test_masking_existing_user_attribute_when_verbose_causes_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
self.config.verbose = True
with self.context.user_mode():
self.context.thing = 'stuff'
self.context._push()
self.context.thing = 'other stuff'
warnings.showwarning = old_showwarning
print repr(warns)
assert warns, 'warns is empty!'
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning), 'warning is not a ContextMaskWarning'
info = warning.args[0]
assert info.startswith('user code'), "%r doesn't start with 'user code'" % info
assert "'thing'" in info, '%r not in %r' % ("'thing'", info)
assert 'tutorial' in info, '"tutorial" not in %r' % (info, )
def test_masking_existing_user_attribute_when_not_verbose_causes_no_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
# explicit
self.config.verbose = False
with self.context.user_mode():
self.context.thing = 'stuff'
self.context._push()
self.context.thing = 'other stuff'
warnings.showwarning = old_showwarning
assert not warns
def test_behave_masking_user_attribute_causes_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
with self.context.user_mode():
self.context.thing = 'stuff'
self.context._push()
self.context.thing = 'other stuff'
warnings.showwarning = old_showwarning
print repr(warns)
assert warns, 'warns is empty!'
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning), 'warning is not a ContextMaskWarning'
info = warning.args[0]
assert info.startswith('behave runner'), "%r doesn't start with 'behave runner'" % info
assert "'thing'" in info, '%r not in %r' % ("'thing'", info)
file = __file__.rsplit('.', 1)[0]
assert file in info, '%r not in %r' % (file, info)
def test_setting_root_attribute_that_masks_existing_causes_warning(self):
warns = []
def catch_warning(*args, **kwargs):
warns.append(args[0])
old_showwarning = warnings.showwarning
warnings.showwarning = catch_warning
with self.context.user_mode():
self.context._push()
self.context.thing = 'teak'
self.context._set_root_attribute('thing', 'oak')
warnings.showwarning = old_showwarning
print repr(warns)
assert warns
warning = warns[0]
assert isinstance(warning, runner.ContextMaskWarning)
info = warning.args[0]
assert info.startswith('behave runner'), "%r doesn't start with 'behave runner'" % info
assert "'thing'" in info, '%r not in %r' % ("'thing'", info)
file = __file__.rsplit('.', 1)[0]
assert file in info, '%r not in %r' % (file, info)
def test_context_deletable(self):
eq_('thing' in self.context, False)
self.context.thing = 'stuff'
eq_('thing' in self.context, True)
del self.context.thing
eq_('thing' in self.context, False)
@raises(AttributeError)
def test_context_deletable_raises(self):
eq_('thing' in self.context, False)
self.context.thing = 'stuff'
eq_('thing' in self.context, True)
self.context._push()
eq_('thing' in self.context, True)
del self.context.thing
class ExampleSteps(object):
text = None
table = None
@staticmethod
def step_passes(context):
pass
@staticmethod
def step_fails(context):
assert False, "XFAIL"
@classmethod
def step_with_text(cls, context):
assert context.text is not None, "REQUIRE: multi-line text"
cls.text = context.text
@classmethod
def step_with_table(cls, context):
assert context.table, "REQUIRE: table"
cls.table = context.table
@classmethod
def register_steps_with(cls, step_registry):
STEP_DEFINITIONS = [
("step", "a step passes", cls.step_passes),
("step", "a step fails", cls.step_fails),
("step", "a step with text", cls.step_with_text),
("step", "a step with a table", cls.step_with_table),
]
for keyword, string, func in STEP_DEFINITIONS:
step_registry.add_step_definition(keyword, string, func)
class TestContext_ExecuteSteps(unittest.TestCase):
"""
Test the behave.runner.Context.execute_steps() functionality.
"""
step_registry = None
def setUp(self):
runner_ = Mock()
self.config = runner_.config = Mock()
runner_.config.verbose = False
runner_.config.stdout_capture = False
runner_.config.stderr_capture = False
runner_.config.log_capture = False
self.context = runner.Context(runner_)
# self.context.text = None
# self.context.table = None
runner_.context = self.context
self.context.feature = Mock()
self.context.feature.parser = parser.Parser()
if not self.step_registry:
# -- SETUP ONCE:
self.step_registry = step_registry.StepRegistry()
ExampleSteps.register_steps_with(self.step_registry)
ExampleSteps.text = None
ExampleSteps.table = None
def test_execute_steps_with_simple_steps(self):
doc = u'''
Given a step passes
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
result = self.context.execute_steps(doc)
eq_(result, True)
def test_execute_steps_with_failing_step(self):
doc = u'''
Given a step passes
When a step fails
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
try:
result = self.context.execute_steps(doc)
except AssertionError, e: # -- PY26-CLEANUP-MARK
ok_("FAILED SUB-STEP: When a step fails" in str(e))
def test_execute_steps_with_undefined_step(self):
doc = u'''
Given a step passes
When a step is undefined
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
try:
result = self.context.execute_steps(doc)
except AssertionError, e: # -- PY26-CLEANUP-MARK
ok_("UNDEFINED SUB-STEP: When a step is undefined" in str(e))
def test_execute_steps_with_text(self):
doc = u'''
Given a step passes
When a step with text:
"""
Lorem ipsum
Ipsum lorem
"""
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
result = self.context.execute_steps(doc)
expected_text = "Lorem ipsum\nIpsum lorem"
eq_(result, True)
eq_(expected_text, ExampleSteps.text)
def test_execute_steps_with_table(self):
doc = u'''
Given a step with a table:
| Name | Age |
| Alice | 12 |
| Bob | 23 |
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
result = self.context.execute_steps(doc)
expected_table = model.Table([u"Name", u"Age"], 0, [
[u"Alice", u"12"],
[u"Bob", u"23"],
])
eq_(result, True)
eq_(expected_table, ExampleSteps.table)
def test_context_table_is_restored_after_execute_steps_without_table(self):
doc = u'''
Given a step passes
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
original_table = "<ORIGINAL_TABLE>"
self.context.table = original_table
self.context.execute_steps(doc)
eq_(self.context.table, original_table)
def test_context_table_is_restored_after_execute_steps_with_table(self):
doc = u'''
Given a step with a table:
| Name | Age |
| Alice | 12 |
| Bob | 23 |
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
original_table = "<ORIGINAL_TABLE>"
self.context.table = original_table
self.context.execute_steps(doc)
eq_(self.context.table, original_table)
def test_context_text_is_restored_after_execute_steps_without_text(self):
doc = u'''
Given a step passes
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
original_text = "<ORIGINAL_TEXT>"
self.context.text = original_text
self.context.execute_steps(doc)
eq_(self.context.text, original_text)
def test_context_text_is_restored_after_execute_steps_with_text(self):
doc = u'''
Given a step passes
When a step with text:
"""
Lorem ipsum
Ipsum lorem
"""
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
original_text = "<ORIGINAL_TEXT>"
self.context.text = original_text
self.context.execute_steps(doc)
eq_(self.context.text, original_text)
@raises(ValueError)
def test_execute_steps_should_fail_when_called_without_feature(self):
doc = u'''
Given a passes
Then a step passes
'''.lstrip()
with patch('behave.step_registry.registry', self.step_registry):
self.context.feature = None
self.context.execute_steps(doc)
class TestRunner(object):
def test_load_hooks_execfiles_hook_file(self):
with patch('behave.runner.exec_file') as ef:
with patch('os.path.exists') as exists:
exists.return_value = True
base_dir = 'fake/path'
hooks_path = os.path.join(base_dir, 'environment.py')
r = runner.Runner(None)
r.base_dir = base_dir
r.load_hooks()
exists.assert_called_with(hooks_path)
ef.assert_called_with(hooks_path, r.hooks)
def test_run_hook_runs_a_hook_that_exists(self):
r = runner.Runner(None)
r.config = Mock()
r.config.dry_run = False
r.hooks['before_lunch'] = hook = Mock()
args = (runner.Context(Mock()), Mock(), Mock())
r.run_hook('before_lunch', *args)
hook.assert_called_with(*args)
def test_run_hook_does_not_runs_a_hook_that_exists_if_dry_run(self):
r = runner.Runner(None)
r.config = Mock()
r.config.dry_run = True
r.hooks['before_lunch'] = hook = Mock()
args = (runner.Context(Mock()), Mock(), Mock())
r.run_hook('before_lunch', *args)
assert len(hook.call_args_list) == 0
def test_setup_capture_creates_stringio_for_stdout(self):
r = runner.Runner(Mock())
r.config.stdout_capture = True
r.config.log_capture = False
r.context = Mock()
r.setup_capture()
assert r.stdout_capture is not None
assert isinstance(r.stdout_capture, StringIO.StringIO)
def test_setup_capture_does_not_create_stringio_if_not_wanted(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.stderr_capture = False
r.config.log_capture = False
r.setup_capture()
assert r.stdout_capture is None
@patch('behave.runner.LoggingCapture')
def test_setup_capture_creates_memory_handler_for_logging(self, handler):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = True
r.context = Mock()
r.setup_capture()
assert r.log_capture is not None
handler.assert_called_with(r.config)
r.log_capture.inveigle.assert_called_with()
def test_setup_capture_does_not_create_memory_handler_if_not_wanted(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.stderr_capture = False
r.config.log_capture = False
r.setup_capture()
assert r.log_capture is None
def test_start_stop_capture_switcheroos_sys_stdout(self):
old_stdout = sys.stdout
sys.stdout = new_stdout = Mock()
r = runner.Runner(Mock())
r.config.stdout_capture = True
r.config.log_capture = False
r.context = Mock()
r.setup_capture()
r.start_capture()
eq_(sys.stdout, r.stdout_capture)
r.stop_capture()
eq_(sys.stdout, new_stdout)
sys.stdout = old_stdout
def test_start_stop_capture_leaves_sys_stdout_alone_if_off(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = False
old_stdout = sys.stdout
r.start_capture()
eq_(sys.stdout, old_stdout)
r.stop_capture()
eq_(sys.stdout, old_stdout)
def test_teardown_capture_removes_log_tap(self):
r = runner.Runner(Mock())
r.config.stdout_capture = False
r.config.log_capture = True
r.log_capture = Mock()
r.teardown_capture()
r.log_capture.abandon.assert_called_with()
def test_exec_file(self):
fn = tempfile.mktemp()
with open(fn, 'w') as f:
f.write('spam = __file__\n')
g = {}
l = {}
runner.exec_file(fn, g, l)
assert '__file__' in l
assert 'spam' in l, '"spam" variable not set in locals (%r)' % (g, l)
eq_(l['spam'], fn)
def test_run_returns_true_if_everything_passed(self):
r = runner.Runner(Mock())
r.setup_capture = Mock()
r.setup_paths = Mock()
r.run_with_paths = Mock()
r.run_with_paths.return_value = True
assert r.run()
def test_run_returns_false_if_anything_failed(self):
r = runner.Runner(Mock())
r.setup_capture = Mock()
r.setup_paths = Mock()
r.run_with_paths = Mock()
r.run_with_paths.return_value = False
assert not r.run()
class TestRunWithPaths(object):
def setUp(self):
self.config = Mock()
self.config.reporters = []
self.config.logging_level = None
self.config.logging_filter = None
self.config.outputs = [ Mock(), StreamOpener(stream=sys.stdout) ]
self.config.format = [ "plain", "progress" ]
self.runner = runner.Runner(self.config)
self.load_hooks = self.runner.load_hooks = Mock()
self.load_step_definitions = self.runner.load_step_definitions = Mock()
self.run_hook = self.runner.run_hook = Mock()
self.run_step = self.runner.run_step = Mock()
self.feature_locations = self.runner.feature_locations = Mock()
self.calculate_summaries = self.runner.calculate_summaries = Mock()
self.formatter_class = patch('behave.formatter.pretty.PrettyFormatter')
formatter_class = self.formatter_class.start()
formatter_class.return_value = self.formatter = Mock()
def tearDown(self):
self.formatter_class.stop()
def test_loads_hooks_and_step_definitions(self):
self.feature_locations.return_value = []
self.runner.run_with_paths()
assert self.load_hooks.called
assert self.load_step_definitions.called
def test_runs_before_all_and_after_all_hooks(self):
# Make runner.feature_locations() and runner.run_hook() the same mock so
# we can make sure things happen in the right order.
self.runner.feature_locations = self.run_hook
self.runner.feature_locations.return_value = []
self.runner.context = Mock()
self.runner.run_with_paths()
eq_(self.run_hook.call_args_list, [
(('before_all', self.runner.context), {}),
((), {}),
(('after_all', self.runner.context), {}),
])
@patch('behave.parser.parse_file')
@patch('os.path.abspath')
def test_parses_feature_files_and_appends_to_feature_list(self, abspath,
parse_file):
feature_locations = ['one', 'two', 'three']
feature = Mock()
feature.tags = []
feature.__iter__ = Mock(return_value=iter([]))
feature.run.return_value = False
self.runner.feature_locations.return_value = feature_locations
abspath.side_effect = lambda x: x.upper()
self.config.lang = 'fritz'
self.config.format = ['plain']
self.config.outputs = [ StreamOpener(stream=sys.stdout) ]
self.config.output.encoding = None
self.config.exclude = lambda s: False
self.config.junit = False
self.config.summary = False
parse_file.return_value = feature
self.runner.run_with_paths()
expected_parse_file_args = \
[((x.upper(),), {'language': 'fritz'}) for x in feature_locations]
eq_(parse_file.call_args_list, expected_parse_file_args)
eq_(self.runner.features, [feature] * 3)
class FsMock(object):
def __init__(self, *paths):
self.base = os.path.abspath('.')
self.sep = os.path.sep
# This bit of gymnastics is to support Windows. We feed in a bunch of
# paths in places using FsMock that assume that POSIX-style paths
# work. This is faster than fixing all of those but at some point we
# should totally do it properly with os.path.join() and all that.
def full_split(path):
bits = []
while path:
path, bit = os.path.split(path)
bits.insert(0, bit)
return bits
paths = [os.path.join(self.base, *full_split(path)) for path in paths]
print repr(paths)
self.paths = paths
self.files = set()
self.dirs = defaultdict(list)
separators = [sep for sep in (os.path.sep, os.path.altsep) if sep]
for path in paths:
if path[-1] in separators:
self.dirs[path[:-1]] = []
d, p = os.path.split(path[:-1])
while d and p:
self.dirs[d].append(p)
d, p = os.path.split(d)
else:
self.files.add(path)
d, f = os.path.split(path)
self.dirs[d].append(f)
self.calls = []
def listdir(self, dir):
# pylint: disable=W0622
# W0622 Redefining built-in dir
self.calls.append(('listdir', dir))
return self.dirs.get(dir, [])
def isfile(self, path):
self.calls.append(('isfile', path))
return path in self.files
def isdir(self, path):
self.calls.append(('isdir', path))
return path in self.dirs
def exists(self, path):
self.calls.append(('exists', path))
return path in self.dirs or path in self.files
def walk(self, path, l=None):
if l is None:
assert path in self.dirs, '%s not in %s' % (path, self.dirs)
l = []
dirnames = []
filenames = []
for e in self.dirs[path]:
if os.path.join(path, e) in self.dirs:
dirnames.append(e)
self.walk(os.path.join(path, e), l)
else:
filenames.append(e)
l.append((path, dirnames, filenames))
return l
# utilities that we need
def dirname(self, path, orig=os.path.dirname):
return orig(path)
def abspath(self, path, orig=os.path.abspath):
return orig(path)
def join(self, a, b, orig=os.path.join):
return orig(a, b)
def split(self, path, orig=os.path.split):
return orig(path)
def splitdrive(self, path, orig=os.path.splitdrive):
return orig(path)
class TestFeatureDirectory(object):
def test_default_path_no_steps(self):
config = Mock()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock()
# will look for a "features" directory and not find one
with patch('os.path', fs):
assert_raises(ConfigError, r.setup_paths)
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
def test_default_path_no_features(self):
config = Mock()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock('features/steps/')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
assert_raises(ConfigError, r.setup_paths)
def test_default_path(self):
config = Mock()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock('features/steps/', 'features/foo.feature')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
eq_(r.base_dir, os.path.abspath('features'))
def test_supplied_feature_file(self):
config = Mock()
config.paths = ['foo.feature']
config.verbose = True
r = runner.Runner(config)
r.context = Mock()
fs = FsMock('steps/', 'foo.feature')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
ok_(('isdir', os.path.join(fs.base, 'steps')) in fs.calls)
ok_(('isfile', os.path.join(fs.base, 'foo.feature')) in fs.calls)
eq_(r.base_dir, fs.base)
def test_supplied_feature_file_no_steps(self):
config = Mock()
config.paths = ['foo.feature']
config.verbose = True
r = runner.Runner(config)
fs = FsMock('foo.feature')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
def test_supplied_feature_directory(self):
config = Mock()
config.paths = ['spam']
config.verbose = True
r = runner.Runner(config)
fs = FsMock('spam/', 'spam/steps/', 'spam/foo.feature')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
ok_(('isdir', os.path.join(fs.base, 'spam', 'steps')) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, 'spam'))
def test_supplied_feature_directory_no_steps(self):
config = Mock()
config.paths = ['spam']
config.verbose = True
r = runner.Runner(config)
fs = FsMock('spam/', 'spam/foo.feature')
with patch('os.path', fs):
with patch('os.walk', fs.walk):
assert_raises(ConfigError, r.setup_paths)
ok_(('isdir', os.path.join(fs.base, 'spam', 'steps')) in fs.calls)
def test_supplied_feature_directory_missing(self):
config = Mock()
config.paths = ['spam']
config.verbose = True
r = runner.Runner(config)
fs = FsMock()
with patch('os.path', fs):
with patch('os.walk', fs.walk):
assert_raises(ConfigError, r.setup_paths)
class TestFeatureDirectoryLayout2(object):
def test_default_path(self):
config = Mock()
config.paths = []
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/steps/',
'features/group1/',
'features/group1/foo.feature',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
eq_(r.base_dir, os.path.abspath('features'))
def test_supplied_root_directory(self):
config = Mock()
config.paths = [ 'features' ]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
'features/steps/',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, 'features'))
def test_supplied_root_directory_no_steps(self):
config = Mock()
config.paths = [ 'features' ]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
eq_(r.base_dir, None)
def test_supplied_feature_file(self):
config = Mock()
config.paths = [ 'features/group1/foo.feature' ]
config.verbose = True
r = runner.Runner(config)
r.context = Mock()
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
'features/steps/',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
ok_(('isfile', os.path.join(fs.base, 'features', 'group1', 'foo.feature')) in fs.calls)
eq_(r.base_dir, fs.join(fs.base, "features"))
def test_supplied_feature_file_no_steps(self):
config = Mock()
config.paths = [ 'features/group1/foo.feature' ]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
assert_raises(ConfigError, r.setup_paths)
def test_supplied_feature_directory(self):
config = Mock()
config.paths = [ 'features/group1' ]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
'features/steps/',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
with r.path_manager:
r.setup_paths()
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
eq_(r.base_dir, os.path.join(fs.base, 'features'))
def test_supplied_feature_directory_no_steps(self):
config = Mock()
config.paths = [ 'features/group1' ]
config.verbose = True
r = runner.Runner(config)
fs = FsMock(
'features/',
'features/group1/',
'features/group1/foo.feature',
)
with patch('os.path', fs):
with patch('os.walk', fs.walk):
assert_raises(ConfigError, r.setup_paths)
ok_(('isdir', os.path.join(fs.base, 'features', 'steps')) in fs.calls)
| bsd-2-clause |
bobthekingofegypt/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/python.py | 167 | 89406 | """ Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import inspect
import re
import types
import sys
import py
import pytest
from _pytest._code.code import TerminalRepr
from _pytest.mark import MarkDecorator, MarkerError
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = '<' in raw_filename and '>' in raw_filename
if is_generated:
return False
# entry.path might point to an inexisting file, in which case it will
# alsso return a str object. see #1133
p = py.path.local(entry.path)
return p != cutdir1 and not p.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, '__test__', False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj)) and
safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
if _PY3:
import codecs
def _escape_bytes(val):
"""
If val is pure ascii, returns it as a str(), otherwise escapes
into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in the string, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
def _escape_bytes(val):
"""
In py2 bytes and str are the same type, so return it unchanged if it
is a full ascii string, otherwise escape it into its binary form.
"""
try:
return val.decode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, bytes):
return _escape_bytes(val)
elif isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
elif _PY2 and isinstance(val, unicode):
# special case for python 2: if a unicode string is
# convertible to ascii, return it as an str() object instead
try:
return str(val)
except UnicodeError:
# fallthrough
pass
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will now execute
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
pytest.fail("DID NOT RAISE {0}".format(expected_exception))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| mpl-2.0 |
lucas794/TP3-Algo-I- | interfaz_jugador.py | 1 | 5482 | import dados
import tablero
from entrada_usuario import *
class InterfazJugador(object):
"""Abstae la interfaz hacia el usuario, para solicitarle ingresos respecto a un determinado
jugador, manejado por dicho usuario."""
def __init__(self):
self.armas = []
self.lugares = []
self.personajes = []
self.nombres = []
def set_personajes(self, personajes):
"""Se le asignan los personajes del juego"""
self.personajes = personajes[:]
def set_armas(self, armas):
"""Se le asignan las armas del juego"""
self.armas = armas[:]
def set_lugares(self, lugares):
"""Se le asignan los lugares del juego"""
self.lugares = lugares[:]
def pedir_cantidad_jugadores(self, minimo, maximo):
"""Le pide al usuario la cantidad de jugadores que van a jugar"""
return solicitar_numero(minimo, maximo, "Ingrese la cantidad de jugadores, entre " + str(minimo) + " y " + str(maximo) + ":\n")
def pedir_nombre_jugador(self, num_jugador):
"""Le pide al usuario el nombre de un jugador. No se permite ingresar un nombre que ya fuere ingresado"""
return solicitar_nombre_jugador(self.nombres, num_jugador)
def pedir_dados(self, jugador, max_dados, max_caras_dados):
"""Obtiene los dados del que debera tener el jugador indicado."""
cantidad = solicitar_numero(1, max_dados, "Ingrese la cantidad de dados que tendra el jugador " + jugador + ".\n")
dados_jugadores = []
for i in range(cantidad):
caras = solicitar_numero(1, max_caras_dados, "Ingrese la cantidad de caras que tendra el dado " + str(i+1) + ".\n")
print "Tipos de dados disponibles:"
mostrar_posibilidades(dados.TIPOS_DADOS)
tipo_dado = solicitar_entre_opciones(dados.TIPOS_DADOS, "que tipo de dado desea usar")
generado = dados.GENERADORES[tipo_dado](caras)
dados_jugadores.append(generado)
return dados_jugadores
def pedir_carta_a_mostrar(self, jugador, posibles):
"""Le pide al jugador que elija entre todas las cartas posibles, para mostrarselas a otro jugador."""
print jugador.get_nombre(), "debe elegir entre:"
mostrar_posibilidades(posibles)
indice_carta = solicitar_entre_opciones(posibles, "la carta a mostrar")
return posibles[indice_carta]
def pedir_sentido(self):
"""Le pide el sentido al usuario. Se devuelven los valores de las constantes indicadas en el tablero."""
print "Deseas moverte en sentido horario o sentido antihorario?"
mostrar_posibilidades(tablero.SENTIDOS)
indice_sentido = solicitar_entre_opciones(tablero.SENTIDOS, "el sentido del movimiento")
if tablero.SENTIDOS[indice_sentido] == tablero.SENTIDO_HORARIO:
return tablero.HORARIO
else:
return tablero.ANTIHORARIO
def quiere_consultar(self, lugar):
"""Le pregunta al usuario si desea realizar una sugerencia o no. Devuelve un valor Booleano con la respuesta"""
return pregunta_si_no("Quiere hacer una sugerencia en " + lugar + "?")
def pedir_personaje(self):
"""Le pide al usuario un personaje."""
indice_personaje = solicitar_entre_opciones(self.personajes, "Personaje")
return self.personajes[indice_personaje]
def pedir_arma(self):
"""Le pide al usuario un arma"""
indice_arma = solicitar_entre_opciones(self.armas, "Arma")
return self.armas[indice_arma]
def pedir_lugar(self):
"""Le pide al usuario un lugar"""
indice_lugar = solicitar_entre_opciones(self.lugares, "Lugar")
return self.lugares[indice_lugar]
def mostrar_carta(self, jugador, carta):
"""Le muestra al jugador (A) que realizo la sugerencia la carta que el jugador (B) (recibido por parametro) le
muestra para afirmar que su sugerencia es falsa."""
print "Tu sugerencia no es cierta!", jugador.get_nombre(), "tenia la carta", carta
def mostrar_no_hay_cartas(self):
"""Le avisa al jugador que realizo la sugerenca que ningun otro jugador puede presentar pruebas
para afirmarla falsa."""
print "Ninguna de las cartas pedidas la tiene otro jugador! que sospechoso!"
def preguntar_arriesgo(self):
"""Le consulta al usuario si desea arriesgar. En caso afirmativo le pide las cartas del arriesgo y devuelve
una tupla (personaje, arma, lugar), en caso negativo devuelve None"""
rta = pregunta_si_no("Desea arriesgarse?")
if not rta:
return None
personaje = self.pedir_personaje()
arma = self.pedir_arma()
lugar = self.pedir_lugar()
return personaje, arma, lugar
def mostrar_mano(self, mano):
"""Muestra la mano de un jugador"""
print "Tu mano es:"
print ", ".join(mano)
print
def mostrar_listado(self, listado):
"""Muestra el listado de cartas que aun no vio un determinado jugador"""
print "Las cartas que aun no viste son:"
print listado
def mostrar_dados(self, resultados):
"""Le muestra al usuario el resultado de la lanzada de dados.
Parametros:
- resultados: un iterable con los resultados del lanzamiento de cada dado"""
print "Tiraste un....",
print ", ".join([str(resultado) for resultado in resultados])
print "Total:", sum(resultados)
| agpl-3.0 |
titilambert/alignak | alignak/dependencynode.py | 1 | 30114 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Frédéric Vachon, fredvac@gmail.com
# aviau, alexandre.viau@savoirfairelinux.com
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# Christophe Simon, geektophe@gmail.com
# Jean Gabes, naparuba@gmail.com
# Gerhard Lausser, gerhard.lausser@consol.de
# Christophe SIMON, christophe.simon@dailymotion.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides DependencyNode and DependencyNodeFactory used for parsing
expression (business rules)
"""
import re
from alignak.util import filter_any, filter_none
from alignak.util import filter_host_by_name, filter_host_by_regex, filter_host_by_group,\
filter_host_by_tag
from alignak.util import filter_service_by_name
from alignak.util import filter_service_by_regex_name
from alignak.util import filter_service_by_regex_host_name
from alignak.util import filter_service_by_host_name
from alignak.util import filter_service_by_bp_rule_label
from alignak.util import filter_service_by_hostgroup_name
from alignak.util import filter_service_by_host_tag_name
from alignak.util import filter_service_by_servicegroup_name
from alignak.util import filter_host_by_bp_rule_label
from alignak.util import filter_service_by_host_bp_rule_label
class DependencyNode(object):
"""
DependencyNode is a node class for business_rule expression(s)
"""
def __init__(self):
self.operand = None
self.sons = []
# Of: values are a triple OK,WARN,CRIT
self.of_values = ('0', '0', '0')
self.is_of_mul = False
self.configuration_errors = []
self.not_value = False
def __str__(self):
return "Op:'%s' Val:'%s' Sons:'[%s]' IsNot:'%s'" % (self.operand, self.of_values,
','.join([str(s) for s in self.sons]),
self.not_value)
def get_reverse_state(self, state):
"""Do a symmetry around 1 of the state ::
* 0 -> 2
* 1 -> 1
* 2 -> 0
* else -> else
:param state: state to reverse
:type state: int
:return: Integer from 0 to 2 (usually)
:rtype: int
"""
# Warning is still warning
if state == 1:
return 1
if state == 0:
return 2
if state == 2:
return 0
# should not go here...
return state
def get_state(self):
"""Get node state by looking recursively over sons and applying operand
:return: Node state
:rtype: int
"""
# print "Ask state of me", self
# If we are a host or a service, wee just got the host/service
# hard state
if self.operand in ['host', 'service']:
return self.get_simple_node_state()
else:
return self.get_complex_node_state()
def get_simple_node_state(self):
"""Get node state, simplest case ::
* Handle not value (revert) for host and service node
* Return 2 instead of 1 for host
:return: 0, 1 or 2
:rtype: int
TODO: Why return 1 when not 0 instead of 2 ?
"""
state = self.sons[0].last_hard_state_id
# print "Get the hard state (%s) for the object %s" % (state, self.sons[0].get_name())
# Make DOWN look as CRITICAL (2 instead of 1)
if self.operand == 'host' and state == 1:
state = 2
# Maybe we are a NOT node, so manage this
if self.not_value:
# We inverse our states
if self.operand == 'host' and state == 1:
return 0
if self.operand == 'host' and state == 0:
return 1
# Critical -> OK
if self.operand == 'service' and state == 2:
return 0
# OK -> CRITICAL (warning is untouched)
if self.operand == 'service' and state == 0:
return 2
return state
def get_complex_node_state(self):
"""Get state, handle AND, OR, X of aggregation.
:return: 0, 1 or 2
:rtype: int
"""
if self.operand == '|':
return self.get_complex_or_node_state()
elif self.operand == '&':
return self.get_complex_and_node_state()
# It's an Xof rule
else:
return self.get_complex_xof_node_state()
def get_complex_or_node_state(self):
"""Get state , handle OR aggregation ::
* Get the best state (min of sons)
* Revert if it's a not node
:return: 0, 1 or 2
:rtype: int
"""
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# Next we calculate the best state
best_state = min(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(best_state)
return best_state
def get_complex_and_node_state(self):
"""Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:return: 0, 1 or 2
:rtype: int
"""
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# Next we calculate the worst state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
def get_complex_xof_node_state(self):
"""Get state , handle X of aggregation ::
* Count the number of OK, WARNING, CRITICAL
* Try too apply, in this order, Critical, Warning, OK rule
* Return the code for first match (2, 1, 0)
* If no rule apply, return OK for simple X of and worst state for multiple X of
:return: 0, 1 or 2
:rtype: int
TODO: Looks like the last if does the opposite of what the comment says
"""
# First we get the state of all our sons
states = [s.get_state() for s in self.sons]
# We search for OK, WARN or CRIT applications
# And we will choice between them
nb_search_ok = self.of_values[0]
nb_search_warn = self.of_values[1]
nb_search_crit = self.of_values[2]
# We look for each application
nb_sons = len(states)
nb_ok = nb_warn = nb_crit = 0
for state in states:
if state == 0:
nb_ok += 1
elif state == 1:
nb_warn += 1
elif state == 2:
nb_crit += 1
# print "NB:", nb_ok, nb_warn, nb_crit
def get_state_for(nb_tot, nb_real, nb_search):
"""Check if there is enough value to apply this rule
:param nb_tot: total number of value
:type nb_tot: int
:param nb_real: number of value that apply for this rule
:type nb_real: int
:param nb_search: max value to apply this rule (can be a percent)
:type nb_search: int
:return: True if the rule is effective (roughly nb_real > nb_search), False otherwise
:rtype: bool
"""
if nb_search.endswith('%'):
nb_search = int(nb_search[:-1])
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(100 + nb_search, 0)
apply_for = float(nb_real) / nb_tot * 100 >= nb_search
else:
nb_search = int(nb_search)
if nb_search < 0:
# nb_search is negative, so +
nb_search = max(nb_tot + nb_search, 0)
apply_for = nb_real >= nb_search
return apply_for
ok_apply = get_state_for(nb_sons, nb_ok, nb_search_ok)
warn_apply = get_state_for(nb_sons, nb_warn + nb_crit, nb_search_warn)
crit_apply = get_state_for(nb_sons, nb_crit, nb_search_crit)
# print "What apply?", ok_apply, warn_apply, crit_apply
# return the worst state that apply
if crit_apply:
if self.not_value:
return self.get_reverse_state(2)
return 2
if warn_apply:
if self.not_value:
return self.get_reverse_state(1)
return 1
if ok_apply:
if self.not_value:
return self.get_reverse_state(0)
return 0
# Maybe even OK is not possible, if so, it depends if the admin
# ask a simple form Xof: or a multiple one A,B,Cof:
# the simple should give OK, the mult should give the worst state
if self.is_of_mul:
# print "Is mul, send 0"
if self.not_value:
return self.get_reverse_state(0)
return 0
else:
# print "not mul, return worst", worse_state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
def list_all_elements(self):
"""Get all host/service in our node and below
:return: list of hosts/services
:rtype: list
"""
res = []
# We are a host/service
if self.operand in ['host', 'service']:
return [self.sons[0]]
for son in self.sons:
res.extend(son.list_all_elements())
# and uniq the result
return list(set(res))
def switch_zeros_of_values(self):
"""If we are a of: rule, we can get some 0 in of_values,
if so, change them with NB sons instead
:return: None
"""
nb_sons = len(self.sons)
# Need a list for assignment
self.of_values = list(self.of_values)
for i in [0, 1, 2]:
if self.of_values[i] == '0':
self.of_values[i] = str(nb_sons)
self.of_values = tuple(self.of_values)
def is_valid(self):
"""Check if all leaves are correct (no error)
:return: True if correct, otherwise False
:rtype: bool
"""
valid = True
if not self.sons:
valid = False
else:
for son in self.sons:
if isinstance(son, DependencyNode) and not son.is_valid():
self.configuration_errors.extend(son.configuration_errors)
valid = False
return valid
class DependencyNodeFactory(object):
"""DependencyNodeFactory provides dependency node parsing functions
"""
host_flags = "grlt"
service_flags = "grl"
def __init__(self, bound_item):
self.bound_item = bound_item
def eval_cor_pattern(self, pattern, hosts, services, running=False):
"""Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
pattern = pattern.strip()
# print "***** EVAL ", pattern
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf ofit, like a host/service
for char in '()&|':
if char in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services, running)
else:
return self.eval_complex_cor_pattern(pattern, hosts, services, running)
def eval_xof_pattern(self, node, pattern):
"""Parse a X of pattern
* Set is_of_mul attribute
* Set of_values attribute
:param node: node to edit
:type node:
:param pattern: line to match
:type pattern: str
:return: end of the line (without X of :)
:rtype: str
"""
xof_pattern = r"^(-?\d+%?),*(-?\d*%?),*(-?\d*%?) *of: *(.+)"
regex = re.compile(xof_pattern)
matches = regex.search(pattern)
if matches is not None:
# print "Match the of: thing N=", m.groups()
node.operand = 'of:'
groups = matches.groups()
# We can have a Aof: rule, or a multiple A,B,Cof: rule.
mul_of = (groups[1] != u'' and groups[2] != u'')
# If multi got (A,B,C)
if mul_of:
node.is_of_mul = True
node.of_values = (groups[0], groups[1], groups[2])
else: # if not, use A,0,0, we will change 0 after to put MAX
node.of_values = (groups[0], '0', '0')
pattern = matches.groups()[3]
return pattern
def eval_complex_cor_pattern(self, pattern, hosts, services, running=False):
"""Parse and build recursively a tree of DependencyNode from a complex pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
in_par = False
tmp = ''
son_is_not = False # We keep is the next son will be not or not
stacked_par = 0
for char in pattern:
if char == '(':
stacked_par += 1
# print "INCREASING STACK TO", stacked_par
in_par = True
tmp = tmp.strip()
# Maybe we just start a par, but we got some things in tmp
# that should not be good in fact !
if stacked_par == 1 and tmp != '':
# TODO : real error
print "ERROR : bad expression near", tmp
continue
# If we are already in a par, add this (
# but not if it's the first one so
if stacked_par > 1:
tmp += char
elif char == ')':
# print "Need closeing a sub expression?", tmp
stacked_par -= 1
if stacked_par < 0:
# TODO : real error
print "Error : bad expression near", tmp, "too much ')'"
continue
if stacked_par == 0:
# print "THIS is closing a sub compress expression", tmp
tmp = tmp.strip()
son = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
in_par = False
# OK now clean the tmp so we start clean
tmp = ''
continue
# ok here we are still in a huge par, we just close one sub one
tmp += char
# Expressions in par will be parsed in a sub node after. So just
# stack pattern
elif in_par:
tmp += char
# Until here, we're not in par
# Manage the NOT for an expression. Only allow ! at the beginning
# of a host or a host,service expression.
elif char == '!':
tmp = tmp.strip()
if tmp and tmp[0] != '!':
print "Error : bad expression near", tmp, "wrong position for '!'"
continue
# Flags next node not state
son_is_not = True
# DO NOT keep the c in tmp, we consumed it
# print "MATCHING", c, pattern
elif char == '&' or char == '|':
# Oh we got a real cut in an expression, if so, cut it
# print "REAL & for cutting"
tmp = tmp.strip()
# Look at the rule viability
if node.operand is not None and node.operand != 'of:' and char != node.operand:
# Should be logged as a warning / info? :)
return None
if node.operand != 'of:':
node.operand = char
if tmp != '':
# print "Will analyse the current str", tmp
son = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
node.sons.append(son)
tmp = ''
# Maybe it's a classic character or we're in par, if so, continue
else:
tmp += char
# Be sure to manage the trainling part when the line is done
tmp = tmp.strip()
if tmp != '':
# print "Managing trainling part", tmp
son = self.eval_cor_pattern(tmp, hosts, services, running)
# Maybe our son was notted
if son_is_not:
son.not_value = True
son_is_not = False
# print "4end I've %s got new sons" % pattern , o
node.sons.append(son)
# We got our nodes, so we can update 0 values of of_values
# with the number of sons
node.switch_zeros_of_values()
return node
def eval_simple_cor_pattern(self, pattern, hosts, services, running=False):
"""Parse and build recursively a tree of DependencyNode from a simple pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
node = DependencyNode()
pattern = self.eval_xof_pattern(node, pattern)
# print "Try to find?", pattern
# If it's a not value, tag the node and find
# the name without this ! operator
if pattern.startswith('!'):
node.not_value = True
pattern = pattern[1:]
# Is the pattern an expression to be expanded?
if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \
re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern):
# o is just extracted its attributes, then trashed.
son = self.expand_expression(pattern, hosts, services, running)
if node.operand != 'of:':
node.operand = '&'
node.sons.extend(son.sons)
node.configuration_errors.extend(son.configuration_errors)
node.switch_zeros_of_values()
else:
node.operand = 'object'
obj, error = self.find_object(pattern, hosts, services)
if obj is not None:
# Set host or service
# pylint: disable=E1101
node.operand = obj.__class__.my_type
node.sons.append(obj)
else:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
def find_object(self, pattern, hosts, services):
"""Find object from pattern
:param pattern: text to search (host1,service1)
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:return: tuple with Host or Service object and error
:rtype: tuple
"""
# print "Finding object", pattern
obj = None
error = None
is_service = False
# h_name, service_desc are , separated
elts = pattern.split(',')
host_name = elts[0].strip()
# If host_name is empty, use the host_name the business rule is bound to
if not host_name:
host_name = self.bound_item.host_name
# Look if we have a service
if len(elts) > 1:
is_service = True
service_description = elts[1].strip()
if is_service:
obj = services.find_srv_by_name_and_hostname(host_name, service_description)
if not obj:
error = "Business rule uses unknown service %s/%s"\
% (host_name, service_description)
else:
obj = hosts.find_by_name(host_name)
if not obj:
error = "Business rule uses unknown host %s" % (host_name,)
return obj, error
def expand_expression(self, pattern, hosts, services, running=False):
"""Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters)
except re.error, regerr:
error = "Business rule uses invalid regex %s: %s" % (pattern, regerr)
else:
if not items:
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item)
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node
def get_host_filters(self, expr):
"""Generates host filter list corresponding to the expression ::
* '*' => any
* 'g' => group filter
* 'r' => regex name filter
* 'l' => bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_host_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_host_by_group(expr)]
elif "r" in flags:
return [filter_host_by_regex(expr)]
elif "l" in flags:
return [filter_host_by_bp_rule_label(expr)]
elif "t" in flags:
return [filter_host_by_tag(expr)]
else:
return [filter_none]
def get_srv_host_filters(self, expr):
"""Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => hostgroup filter
* 'r' => host regex name filter
* 'l' => host bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => host name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.host_flags, expr)
if match is None:
return [filter_service_by_host_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_hostgroup_name(expr)]
elif "r" in flags:
return [filter_service_by_regex_host_name(expr)]
elif "l" in flags:
return [filter_service_by_host_bp_rule_label(expr)]
elif "t" in flags:
return [filter_service_by_host_tag_name(expr)]
else:
return [filter_none]
def get_srv_service_filters(self, expr):
"""Generates service filter list corresponding to the expression ::
* '*' => any
* 'g' => servicegroup filter
* 'r' => service regex name filter
* 'l' => service bp rule label filter
* 't' => tag filter
* '' => none filter
* No flag match => service name filter
:param expr: expression to parse
:type expr: str
:return: filter list
:rtype: list
"""
if expr == "*":
return [filter_any]
match = re.search(r"^([%s]+):(.*)" % self.service_flags, expr)
if match is None:
return [filter_service_by_name(expr)]
flags, expr = match.groups()
if "g" in flags:
return [filter_service_by_servicegroup_name(expr)]
elif "r" in flags:
return [filter_service_by_regex_name(expr)]
elif "l" in flags:
return [filter_service_by_bp_rule_label(expr)]
else:
return [filter_none]
| agpl-3.0 |
openweave/openweave-core | src/test-apps/happy/tests/service/wdmNext/test_weave_wdm_next_service_update_07_cond_OneLeaf_multi_traits.py | 1 | 2979 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between mock device and real service.
# Test Wdm-NestService-O07: Client creates a mutual subscription, sends one
# UpdateRequest to the publisher with 2 traits, and receives a
# StatusReport
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
from weave_wdm_next_test_service_base import weave_wdm_next_test_service_base
class test_weave_wdm_next_service_update_07_cond_OneLeaf_multi_traits(weave_wdm_next_test_service_base):
def test_weave_wdm_next_service_update_07_cond_OneLeaf_multi_traits(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['final_client_status'] = 0
wdm_next_args['enable_client_flip'] = 1
wdm_next_args['test_client_iterations'] = 1
wdm_next_args['test_client_delay'] = 4000
wdm_next_args['timer_client_period'] = 4000
wdm_next_args['client_clear_state_between_iterations'] = False
wdm_next_args['test_client_case'] = 10 # kTestCase_TestUpdatableTraits
wdm_next_args['total_client_count'] = 1
wdm_next_args['enable_retry'] = True
wdm_next_args['client_update_mutation'] = "OneLeaf"
wdm_next_args['client_update_num_traits'] = 2
wdm_next_args['client_update_num_mutations'] = 1
wdm_next_args['client_log_check'] = [('Mutual: Good Iteration', 1),
('Update: path result: success', 2),
('Update: no more pending updates', 1),
('Update: path failed', 0),
('Need to resubscribe', 0)]
wdm_next_args['test_tag'] = self.__class__.__name__
wdm_next_args['test_case_name'] = ['Wdm-NestService-O07: Client creates a mutual subscription, sends one UpdateRequest to the publisher with 2 traits, and receives a StatusReport']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test O07")
super(test_weave_wdm_next_service_update_07_cond_OneLeaf_multi_traits, self).weave_wdm_next_test_service_base(wdm_next_args)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
nealtodd/wagtail | wagtail/contrib/settings/tests/test_templates.py | 13 | 10270 | from django.template import Context, RequestContext, Template, engines
from django.test import TestCase
from wagtail.core.models import Page, Site
from wagtail.tests.testapp.models import TestSetting
from wagtail.tests.utils import WagtailTestUtils
class TemplateTestCase(TestCase, WagtailTestUtils):
def setUp(self):
root = Page.objects.first()
other_home = Page(title='Other Root')
root.add_child(instance=other_home)
self.default_site = Site.objects.get(is_default_site=True)
self.other_site = Site.objects.create(hostname='other', root_page=other_home)
self.test_setting = TestSetting.objects.create(
title='Site title',
email='initial@example.com',
site=self.default_site)
self.other_setting = TestSetting.objects.create(
title='Other title',
email='other@example.com',
site=self.other_site)
def get_request(self, site=None):
if site is None:
site = self.default_site
request = self.client.get('/test/', HTTP_HOST=site.hostname)
request.site = site
return request
def render(self, request, string, context=None, site=None):
template = Template(string)
context = RequestContext(request, context)
return template.render(context)
class TestContextProcessor(TemplateTestCase):
def test_accessing_setting(self):
""" Check that the context processor works """
request = self.get_request()
self.assertEqual(
self.render(request, '{{ settings.tests.TestSetting.title }}'),
self.test_setting.title)
def test_multisite(self):
""" Check that the correct setting for the current site is returned """
request = self.get_request(site=self.default_site)
self.assertEqual(
self.render(request, '{{ settings.tests.TestSetting.title }}'),
self.test_setting.title)
request = self.get_request(site=self.other_site)
self.assertEqual(
self.render(request, '{{ settings.tests.TestSetting.title }}'),
self.other_setting.title)
def test_model_case_insensitive(self):
""" Model names should be case insensitive """
request = self.get_request()
self.assertEqual(
self.render(request, '{{ settings.tests.testsetting.title }}'),
self.test_setting.title)
self.assertEqual(
self.render(request, '{{ settings.tests.TESTSETTING.title }}'),
self.test_setting.title)
self.assertEqual(
self.render(request, '{{ settings.tests.TestSetting.title }}'),
self.test_setting.title)
self.assertEqual(
self.render(request, '{{ settings.tests.tEstsEttIng.title }}'),
self.test_setting.title)
def test_models_cached(self):
""" Accessing a setting should only hit the DB once per render """
request = self.get_request()
get_title = '{{ settings.tests.testsetting.title }}'
for i in range(1, 4):
with self.assertNumQueries(1):
self.assertEqual(
self.render(request, get_title * i),
self.test_setting.title * i)
class TestTemplateTag(TemplateTestCase):
def test_no_context_processor(self):
"""
Assert that not running the context processor means settings are not in
the context, as expected.
"""
template = Template('{{ settings.tests.TestSetting.title }}')
context = Context()
self.assertEqual(template.render(context), '')
def test_get_settings_request_context(self):
""" Check that the {% get_settings %} tag works """
request = self.get_request(site=self.other_site)
context = Context({'request': request})
# This should use the site in the request
template = Template('{% load wagtailsettings_tags %}'
'{% get_settings %}'
'{{ settings.tests.testsetting.title}}')
self.assertEqual(template.render(context), self.other_setting.title)
def test_get_settings_request_context_use_default(self):
"""
Check that the {% get_settings use_default_site=True %} option
overrides a request in the context.
"""
request = self.get_request(site=self.other_site)
context = Context({'request': request})
# This should use the default site, ignoring the site in the request
template = Template('{% load wagtailsettings_tags %}'
'{% get_settings use_default_site=True %}'
'{{ settings.tests.testsetting.title}}')
self.assertEqual(template.render(context), self.test_setting.title)
def test_get_settings_use_default(self):
"""
Check that the {% get_settings use_default_site=True %} option works
"""
context = Context()
# This should use the default site
template = Template('{% load wagtailsettings_tags %}'
'{% get_settings use_default_site=True %}'
'{{ settings.tests.testsetting.title}}')
self.assertEqual(template.render(context), self.test_setting.title)
def test_get_settings_no_request_no_default(self):
"""
Check that the {% get_settings %} throws an error if it can not find a
site to work with
"""
context = Context()
# Without a request in the context, and without use_default_site, this
# should bail with an error
template = Template('{% load wagtailsettings_tags %}'
'{% get_settings %}'
'{{ settings.tests.testsetting.title}}')
with self.assertRaises(RuntimeError):
template.render(context)
class TestSettingsJinja(TemplateTestCase):
def setUp(self):
super().setUp()
self.engine = engines['jinja2']
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
# Add a request to the template, to simulate a RequestContext
if request_context:
if 'site' in context:
site = context['site']
else:
site = Site.objects.get(is_default_site=True)
request = self.client.get('/test/', HTTP_HOST=site.hostname)
request.site = site
context['request'] = request
template = self.engine.from_string(string)
return template.render(context)
def test_accessing_setting(self):
""" Check that the context processor works """
self.assertEqual(
self.render('{{ settings("tests.TestSetting").title }}'),
self.test_setting.title)
def test_multisite(self):
""" Check that the correct setting for the current site is returned """
context = {'site': self.default_site}
self.assertEqual(
self.render('{{ settings("tests.TestSetting").title }}', context),
self.test_setting.title)
context = {'site': self.other_site}
self.assertEqual(
self.render('{{ settings("tests.TestSetting").title }}', context),
self.other_setting.title)
def test_model_case_insensitive(self):
""" Model names should be case insensitive """
self.assertEqual(
self.render('{{ settings("tests.testsetting").title }}'),
self.test_setting.title)
self.assertEqual(
self.render('{{ settings("tests.TESTSETTING").title }}'),
self.test_setting.title)
self.assertEqual(
self.render('{{ settings("tests.TestSetting").title }}'),
self.test_setting.title)
self.assertEqual(
self.render('{{ settings("tests.tEstsEttIng").title }}'),
self.test_setting.title)
def test_models_cached(self):
""" Accessing a setting should only hit the DB once per render """
get_title = '{{ settings("tests.testsetting").title }}'
# Cant use the default 'self.render()' as it does DB queries to get
# site, dummy request
site = Site.objects.get(is_default_site=True)
request = self.client.get('/test/', HTTP_HOST=site.hostname)
request.site = site
for i in range(1, 4):
with self.assertNumQueries(1):
context = {'request': request}
template = self.engine.from_string(get_title * i)
self.assertEqual(
template.render(context),
self.test_setting.title * i)
def test_settings_use_default_site_override(self):
"""
Check that {{ settings(use_default_site=True) }} overrides a site in
the context.
"""
request = self.get_request(site=self.other_site)
context = {'request': request}
# This should use the default site, ignoring the site in the request
template = '{{ settings("tests.testsetting", use_default_site=True).title }}'
self.assertEqual(
self.render(template, context),
self.test_setting.title)
def test_settings_use_default_site(self):
"""
Check that the {{ settings(use_default_site=True) }} option works with
no site in the context
"""
context = {}
# This should use the default site
template = '{{ settings("tests.testsetting", use_default_site=True).title}}'
self.assertEqual(
self.render(template, context, request_context=False),
self.test_setting.title)
def test_settings_no_request_no_use_default(self):
"""
Check that {{ settings }} throws an error if it can not find a
site to work with
"""
context = {}
# Without a request in the context, and without use_default_site, this
# should bail with an error
template = '{{ settings("tests.testsetting").title}}'
with self.assertRaises(RuntimeError):
self.render(template, context, request_context=False)
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/asn1crypto/_elliptic_curve.py | 16 | 9411 | # coding: utf-8
"""
Classes and objects to represent prime-field elliptic curves and points on them.
Exports the following items:
- PrimeCurve()
- PrimePoint()
- SECP192R1_CURVE
- SECP192R1_BASE_POINT
- SECP224R1_CURVE
- SECP224R1_BASE_POINT
- SECP256R1_CURVE
- SECP256R1_BASE_POINT
- SECP384R1_CURVE
- SECP384R1_BASE_POINT
- SECP521R1_CURVE
- SECP521R1_BASE_POINT
The curve constants are all PrimeCurve() objects and the base point constants
are all PrimePoint() objects.
Some of the following source code is derived from
http://webpages.charter.net/curryfans/peter/downloads.html, but has been heavily
modified to fit into this projects lint settings. The original project license
is listed below:
Copyright (c) 2014 Peter Pearson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from ._int import inverse_mod
class PrimeCurve():
"""
Elliptic curve over a prime field. Characteristic two field curves are not
supported.
"""
def __init__(self, p, a, b):
"""
The curve of points satisfying y^2 = x^3 + a*x + b (mod p)
:param p:
The prime number as an integer
:param a:
The component a as an integer
:param b:
The component b as an integer
"""
self.p = p
self.a = a
self.b = b
def contains(self, point):
"""
:param point:
A Point object
:return:
Boolean if the point is on this curve
"""
y2 = point.y * point.y
x3 = point.x * point.x * point.x
return (y2 - (x3 + self.a * point.x + self.b)) % self.p == 0
class PrimePoint():
"""
A point on a prime-field elliptic curve
"""
def __init__(self, curve, x, y, order=None):
"""
:param curve:
A PrimeCurve object
:param x:
The x coordinate of the point as an integer
:param y:
The y coordinate of the point as an integer
:param order:
The order of the point, as an integer - optional
"""
self.curve = curve
self.x = x
self.y = y
self.order = order
# self.curve is allowed to be None only for INFINITY:
if self.curve:
if not self.curve.contains(self):
raise ValueError('Invalid EC point')
if self.order:
if self * self.order != INFINITY:
raise ValueError('Invalid EC point')
def __cmp__(self, other):
"""
:param other:
A PrimePoint object
:return:
0 if identical, 1 otherwise
"""
if self.curve == other.curve and self.x == other.x and self.y == other.y:
return 0
else:
return 1
def __add__(self, other):
"""
:param other:
A PrimePoint object
:return:
A PrimePoint object
"""
# X9.62 B.3:
if other == INFINITY:
return self
if self == INFINITY:
return other
assert self.curve == other.curve
if self.x == other.x:
if (self.y + other.y) % self.curve.p == 0:
return INFINITY
else:
return self.double()
p = self.curve.p
l = ((other.y - self.y) * inverse_mod(other.x - self.x, p)) % p
x3 = (l * l - self.x - other.x) % p
y3 = (l * (self.x - x3) - self.y) % p
return PrimePoint(self.curve, x3, y3)
def __mul__(self, other):
"""
:param other:
An integer to multiple the Point by
:return:
A PrimePoint object
"""
def leftmost_bit(x):
assert x > 0
result = 1
while result <= x:
result = 2 * result
return result // 2
e = other
if self.order:
e = e % self.order
if e == 0:
return INFINITY
if self == INFINITY:
return INFINITY
assert e > 0
# From X9.62 D.3.2:
e3 = 3 * e
negative_self = PrimePoint(self.curve, self.x, -self.y, self.order)
i = leftmost_bit(e3) // 2
result = self
# print "Multiplying %s by %d (e3 = %d):" % ( self, other, e3 )
while i > 1:
result = result.double()
if (e3 & i) != 0 and (e & i) == 0:
result = result + self
if (e3 & i) == 0 and (e & i) != 0:
result = result + negative_self
# print ". . . i = %d, result = %s" % ( i, result )
i = i // 2
return result
def __rmul__(self, other):
"""
:param other:
An integer to multiple the Point by
:return:
A PrimePoint object
"""
return self * other
def double(self):
"""
:return:
A PrimePoint object that is twice this point
"""
# X9.62 B.3:
p = self.curve.p
a = self.curve.a
l = ((3 * self.x * self.x + a) * inverse_mod(2 * self.y, p)) % p
x3 = (l * l - 2 * self.x) % p
y3 = (l * (self.x - x3) - self.y) % p
return PrimePoint(self.curve, x3, y3)
# This one point is the Point At Infinity for all purposes:
INFINITY = PrimePoint(None, None, None)
# NIST Curve P-192:
SECP192R1_CURVE = PrimeCurve(
6277101735386680763835789423207666416083908700390324961279,
-3,
0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
)
SECP192R1_BASE_POINT = PrimePoint(
SECP192R1_CURVE,
0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012,
0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811,
6277101735386680763835789423176059013767194773182842284081
)
# NIST Curve P-224:
SECP224R1_CURVE = PrimeCurve(
26959946667150639794667015087019630673557916260026308143510066298881,
-3,
0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4
)
SECP224R1_BASE_POINT = PrimePoint(
SECP224R1_CURVE,
0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21,
0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34,
26959946667150639794667015087019625940457807714424391721682722368061
)
# NIST Curve P-256:
SECP256R1_CURVE = PrimeCurve(
115792089210356248762697446949407573530086143415290314195533631308867097853951,
-3,
0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
)
SECP256R1_BASE_POINT = PrimePoint(
SECP256R1_CURVE,
0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296,
0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5,
115792089210356248762697446949407573529996955224135760342422259061068512044369
)
# NIST Curve P-384:
SECP384R1_CURVE = PrimeCurve(
39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319, # noqa
-3,
0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef
)
SECP384R1_BASE_POINT = PrimePoint(
SECP384R1_CURVE,
0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7,
0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f,
39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643
)
# NIST Curve P-521:
SECP521R1_CURVE = PrimeCurve(
6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151, # noqa
-3,
0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00 # noqa
)
SECP521R1_BASE_POINT = PrimePoint(
SECP521R1_CURVE,
0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66, # noqa
0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650, # noqa
6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449 # noqa
)
| gpl-3.0 |
laborautonomo/poedit | deps/boost/tools/build/v2/test/project_test4.py | 64 | 1792 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(translate_suffixes=0)
t.set_tree("project-test4")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.obj")
t.expect_content("bin/$toolset/debug/a.obj",
"""$toolset/debug/include-everything
a.cpp
""")
t.expect_addition("bin/$toolset/debug/a.exe")
t.expect_content("bin/$toolset/debug/a.exe",
"$toolset/debug/include-everything\n" +
"bin/$toolset/debug/a.obj lib/bin/$toolset/debug/optimization-speed/b.obj\n"
)
t.expect_addition("lib/bin/$toolset/debug/optimization-speed/b.obj")
t.expect_content("lib/bin/$toolset/debug/optimization-speed/b.obj",
"""$toolset/debug/include-everything/optimization-speed
lib/b.cpp
""")
t.expect_addition("bin/$toolset/debug/b.exe")
t.expect_content("bin/$toolset/debug/b.exe",
"$toolset/debug/define-MACROS/include-everything\n" +
"bin/$toolset/debug/a.obj\n"
)
t.copy("lib/jamfile3.jam", "lib/jamfile.jam")
# Link-compatibility check for rtti is disabled...
#t.run_build_system(status=None)
#import string
#t.fail_test(string.find(t.stdout(),
#"""warning: targets produced from b.obj are link incompatible
#warning: with main target a.exe""") !=-0)
# Test that if we specified composite property in target reference, everything
# works OK.
t.copy("lib/jamfile1.jam", "lib/jamfile.jam")
t.copy("jamfile5.jam", "jamfile.jam")
t.run_build_system()
t.expect_addition("lib/bin/$toolset/release/b.obj")
t.expect_content("bin/$toolset/debug/a.exe",
"$toolset/debug/include-everything\n" +
"bin/$toolset/debug/a.obj lib/bin/$toolset/release/b.obj\n"
)
t.cleanup()
| mit |
yongshengwang/hue | desktop/core/ext-py/pycrypto-2.6.1/build/lib.linux-x86_64-2.7/Crypto/SelfTest/Random/OSRNG/test_nt.py | 131 | 1764 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_generic.py: Self-test for the OSRNG.nt.new() function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.OSRNG.nt"""
__revision__ = "$Id$"
import unittest
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.OSRNG.nt.new()"""
# Import the OSRNG.nt module and try to use it
import Crypto.Random.OSRNG.nt
randobj = Crypto.Random.OSRNG.nt.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
osma/annif | create_corpus_yso_finna.py | 1 | 4644 | #!/usr/bin/env python3
from SPARQLWrapper import SPARQLWrapper, JSON
import requests
import re
import os
import os.path
import time
import sys
FINTO_ENDPOINT='http://api.dev.finto.fi/sparql'
FINNA_API_SEARCH='https://api.finna.fi/v1/search'
lang = sys.argv[1]
# map ISO 639-1 language codes into the ISO 639-2 codes that Finna uses
LANGMAP = {
'fi': 'fin',
'sv': 'swe',
'en': 'eng'
}
def row_to_concept(row):
concept = {'uri': row['c']['value'],
'pref': row['pref']['value'],
'ysapref': row['ysapref']['value'],
'allarspref': row['allarspref']['value']}
if 'alts' in row:
concept['alts'] = row['alts']['value']
return concept
def get_concepts(lang):
sparql = SPARQLWrapper(FINTO_ENDPOINT)
sparql.setQuery("""
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX ysometa: <http://www.yso.fi/onto/yso-meta/>
SELECT ?c ?pref (GROUP_CONCAT(?alt) AS ?alts) ?ysapref ?allarspref
WHERE {
GRAPH <http://www.yso.fi/onto/yso/> {
?c a skos:Concept .
?c skos:prefLabel ?pref .
FILTER(LANG(?pref)='%s')
OPTIONAL {
?c skos:altLabel ?alt .
FILTER(LANG(?alt)='%s')
}
FILTER NOT EXISTS { ?c owl:deprecated true }
FILTER NOT EXISTS { ?c a ysometa:Hierarchy }
}
GRAPH <http://www.yso.fi/onto/ysa/> {
?ysac skos:closeMatch|skos:exactMatch ?c .
?ysac skos:prefLabel ?ysapref .
}
GRAPH <http://www.yso.fi/onto/allars/> {
?allarsc skos:closeMatch|skos:exactMatch ?c .
?allarsc skos:prefLabel ?allarspref .
}
}
GROUP BY ?c ?pref ?ysapref ?allarspref
#LIMIT 500
""" % (lang, lang))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return [row_to_concept(row) for row in results['results']['bindings']]
concepts = get_concepts(lang)
def search_finna(params):
r = requests.get(FINNA_API_SEARCH, params=params, headers={'User-agent': 'annif 0.1'})
return r.json()
def records_to_texts(records):
texts = []
for rec in records:
if 'title' in rec:
texts.append(rec['title'])
if 'summary' in rec:
for summary in rec['summary']:
texts.append(summary)
return texts
def generate_text(concept, lang):
# start with pref- and altlabels
labels = [concept['pref']]
if lang == 'fi':
# we can use the YSA label too
labels.append(concept['ysapref'])
if lang == 'sv':
# we can use the Allars label too
labels.append(concept['allarspref'])
if 'alts' in concept:
labels.append(concept['alts'])
labels = ' '.join(labels)
# look for more text in Finna API
texts = []
fields = ['title','summary']
finnaterms = (concept['ysapref'], concept['allarspref'])
finnalang = LANGMAP[lang]
# Search type 1: exact matches using topic facet
params = {'lookfor': 'topic_facet:"%s" OR topic_facet:"%s"' % finnaterms, 'filter[]': 'language:%s' % finnalang, 'lng':lang, 'limit':100, 'field[]':fields}
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
# Search type 2: exact matches using Subject search
params['lookfor'] = '"%s" OR "%s"' % finnaterms
params['type'] = 'Subject'
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
# Search type 3: fuzzy matches using Subject search
params['lookfor'] = '(%s) OR (%s)' % finnaterms
response = search_finna(params)
if 'records' in response:
texts += records_to_texts(response['records'])
return "\n".join([labels] + list(set(texts)))
for concept in concepts:
localname = concept['uri'].split('/')[-1]
outfile = 'corpus/%s-%s.raw' % (localname, lang)
if os.path.exists(outfile):
continue
text = None
tries = 0
while tries < 10:
try:
text = generate_text(concept, lang)
break
except:
# failure, try again until tries exhausted
tries += 1
print("Error generating text for concept %s, trying again (attempt %d)" % (concept['uri'], tries))
time.sleep(tries) # wait progressively longer between attempts
if text is None:
print("Failed looking up concept %s, exiting" % concept['uri'])
sys.exit(1)
print(localname, lang, concept['pref'], concept['ysapref'], concept['allarspref'], len(text.split()))
f = open(outfile, 'w')
print (concept['uri'], concept['pref'], file=f)
print (text, file=f)
f.close()
| cc0-1.0 |
danielneis/osf.io | api_tests/nodes/views/test_node_links_detail.py | 2 | 9590 | # -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinkDetail(ApiTestCase):
def setUp(self):
super(TestNodeLinkDetail, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer = self.private_project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.private_project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_returns_embedded_public_node_pointer_detail_logged_out(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_public_node_pointer_detail_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_private_node_pointer_detail_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_returns_private_node_pointer_detail_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_returns_private_node_pointer_detail_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_self_link_points_to_node_link_detail_url(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = res.json['data']['links']['self']
assert_in(self.public_url, url)
class TestDeleteNodeLink(ApiTestCase):
def setUp(self):
super(TestDeleteNodeLink, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_delete_node_link_no_permissions_for_target_node(self):
pointer_project = ProjectFactory(creator=self.user_two, is_public=False)
pointer = self.public_project.add_pointer(pointer_project, auth=Auth(self.user), save=True)
assert_in(pointer, self.public_project.nodes)
url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, pointer._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 204)
self.public_project.reload()
assert_not_in(pointer, self.public_project.nodes)
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}nodes/{}/node_links/'.format(
API_BASE,
registration._id,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
pointer_id = res.json['data'][0]['id']
url = '/{}nodes/{}/node_links/{}/'.format(
API_BASE,
registration._id,
pointer_id,
)
res = self.app.delete(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_deletes_public_node_pointer_logged_out(self):
res = self.app.delete(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0].keys())
def test_deletes_public_node_pointer_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user_two.auth, expect_errors=True)
# This is could arguably be a 405, but we don't need to go crazy with status codes
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_deletes_public_node_pointer_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 1, len(self.public_project.nodes_pointer))
def test_deletes_private_node_pointer_logged_out(self):
res = self.app.delete(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_deletes_private_node_pointer_logged_in_contributor(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_deletes_private_node_pointer_logged_in_non_contributor(self):
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_deleted_public_node_pointer(self):
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_deleted_private_node_pointer(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete(
'/{}nodes/{}/node_links/{}/'.format(API_BASE, project._id, self.public_pointer._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Node link does not belong to the requested node.')
| apache-2.0 |
DazWorrall/ansible | lib/ansible/modules/cloud/google/gce_tag.py | 26 | 7635 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_tag
version_added: "2.0"
short_description: add or remove tag(s) to/from GCE instances
description:
- This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone
options:
instance_name:
description:
- The name of the GCE instance to add/remove tags. Required if instance_pattern is not specified.
required: false
default: null
aliases: []
instance_pattern:
description:
- The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
See U(https://docs.python.org/2/library/re.html) for details.
If instance_name is not specified, this field is required.
required: false
default: null
aliases: []
version_added: "2.3"
tags:
description:
- comma-separated list of tags to add or remove
required: true
default: null
aliases: []
state:
description:
- desired state of the tags
required: false
default: "present"
choices: ["present", "absent"]
aliases: []
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
aliases: []
service_account_email:
description:
- service account email
required: false
default: null
aliases: []
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.17.0"
notes:
- Either I(instance_name) or I(instance_pattern) is required.
author:
- Do Hoang Khiem (dohoangkhiem@gmail.com)
- Tom Melendez (@supertom)
'''
EXAMPLES = '''
# Add tags 'http-server', 'https-server', 'staging' to instance name 'staging-server' in zone us-central1-a.
- gce_tag:
instance_name: staging-server
tags: http-server,https-server,staging
zone: us-central1-a
state: present
# Remove tags 'foo', 'bar' from instance 'test-server' in default zone (us-central1-a)
- gce_tag:
instance_name: test-server
tags: foo,bar
state: absent
# Add tags 'foo', 'bar' to instances in zone that match pattern
- gce_tag:
instance_pattern: test-server-*
tags: foo,bar
zone: us-central1-a
state: present
'''
import re
import traceback
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, InvalidRequestError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
def _union_items(baselist, comparelist):
"""Combine two lists, removing duplicates."""
return list(set(baselist) | set(comparelist))
def _intersect_items(baselist, comparelist):
"""Return matching items in both lists."""
return list(set(baselist) & set(comparelist))
def _get_changed_items(baselist, comparelist):
"""Return changed items as they relate to baselist."""
return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
def modify_tags(gce, module, node, tags, state='present'):
"""Modify tags on an instance."""
existing_tags = node.extra['tags']
tags = [x.lower() for x in tags]
tags_changed = []
if state == 'absent':
# tags changed are any that intersect
tags_changed = _intersect_items(existing_tags, tags)
if not tags_changed:
return False, None
# update instance with tags in existing tags that weren't specified
node_tags = _get_changed_items(existing_tags, tags)
else:
# tags changed are any that in the new list that weren't in existing
tags_changed = _get_changed_items(tags, existing_tags)
if not tags_changed:
return False, None
# update instance with the combined list
node_tags = _union_items(existing_tags, tags)
try:
gce.ex_set_node_tags(node, node_tags)
return True, tags_changed
except (GoogleBaseError, InvalidRequestError) as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
instance_name=dict(required=False),
instance_pattern=dict(required=False),
tags=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
),
mutually_exclusive=[
[ 'instance_name', 'instance_pattern' ]
],
required_one_of=[
[ 'instance_name', 'instance_pattern' ]
]
)
instance_name = module.params.get('instance_name')
instance_pattern = module.params.get('instance_pattern')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
# Create list of nodes to operate on
matching_nodes = []
try:
if instance_pattern:
instances = gce.list_nodes(ex_zone=zone)
# no instances in zone
if not instances:
module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
try:
# Python regex fully supported: https://docs.python.org/2/library/re.html
p = re.compile(instance_pattern)
matching_nodes = [i for i in instances if p.search(i.name) is not None]
except re.error as e:
module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
else:
matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
# Tag nodes
instance_pattern_matches = []
tags_changed = []
for node in matching_nodes:
changed, tags_changed = modify_tags(gce, module, node, tags, state)
if changed:
instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
if instance_pattern:
module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
else:
module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
if __name__ == '__main__':
main()
| gpl-3.0 |
melqkiades/yelp | source/python/recommenders/context/neighbourhood/simple_neighbourhood_calculator.py | 1 | 2089 | from recommenders.context.neighbourhood.abstract_neighbourhood_calculator import \
AbstractNeighbourhoodCalculator
from utils import dictionary_utils
__author__ = 'fpena'
class SimpleNeighbourhoodCalculator(AbstractNeighbourhoodCalculator):
def __init__(self, user_similarity_calculator):
super(SimpleNeighbourhoodCalculator, self).__init__()
self.user_ids = None
self.user_dictionary = None
self.topic_indices = None
self.num_neighbours = None
self.similarity_matrix = None
self.user_similarity_calculator = user_similarity_calculator
def load(self, user_ids, user_dictionary, topic_indices, num_neighbours):
self.user_ids = user_ids
self.user_dictionary = user_dictionary
self.topic_indices = topic_indices
self.num_neighbours = num_neighbours
self.user_similarity_calculator.load(
self.user_ids, self.user_dictionary, self.topic_indices)
self.similarity_matrix =\
self.user_similarity_calculator.create_similarity_matrix()
def get_neighbourhood(self, user, item, context, threshold):
sim_users_matrix = self.similarity_matrix[user].copy()
sim_users_matrix.pop(user, None)
# We remove the users who have not rated the given item
sim_users_matrix = {
k: v for k, v in sim_users_matrix.items()
if item in self.user_dictionary[k].item_ratings}
# We remove neighbours that don't have a similarity with user
sim_users_matrix = {
k: v for k, v in sim_users_matrix.items()
if v}
# Sort the users by similarity
neighbourhood = dictionary_utils.sort_dictionary_keys(
sim_users_matrix)
if self.num_neighbours is None:
return neighbourhood
return neighbourhood[:self.num_neighbours]
def clear(self):
self.user_ids = None
self.user_dictionary = None
self.topic_indices = None
self.similarity_matrix = None
self.user_similarity_calculator.clear()
| lgpl-2.1 |
open-synergy/account-invoicing | account_invoice_shipping_address/__openerp__.py | 15 | 1563 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2013 Andrea Cometa Perito Informatico (www.andreacometa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Invoice Shipping Address",
'summary': """
Adds a shipping address field to the invoice.""",
"version": "8.0.0.1.1",
'category': 'Generic Modules/Accounting',
"depends": ["account", "sale", "sale_stock"],
"author": "Andrea Cometa, Agile Business Group,"
"Odoo Community Association (OCA)",
'website': 'http://www.andreacometa.it',
'license': 'AGPL-3',
'data': [
'invoice_view.xml',
],
'installable': True,
}
| agpl-3.0 |
trialreach/django-better-chunks | chunks/templatetags/chunks.py | 3 | 2103 | from django import template
from django.db import models
from django.core.cache import cache
from django.contrib.sites.models import Site
register = template.Library()
Chunk = models.get_model('chunks', 'Chunk')
CACHE_PREFIX = "chunk_"
def do_get_chunk(parser, token):
# split_contents() knows not to split quoted strings.
tokens = token.split_contents()
if len(tokens) < 2 or len(tokens) > 3:
raise template.TemplateSyntaxError, "%r tag should have either 2 or 3 arguments" % (tokens[0],)
if len(tokens) == 2:
tag_name, key = tokens
cache_time = 0
if len(tokens) == 3:
tag_name, key, cache_time = tokens
# Check to see if the key is properly double/single quoted
if not (key[0] == key[-1] and key[0] in ('"', "'")):
raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name
# Send key (without quotes) and caching time
return ChunkNode(key[1:-1], cache_time)
class ChunkNode(template.Node):
def __init__(self, key, cache_time=0):
self.key = key
self.cache_time = int(cache_time)
self.lang_code = template.Variable('LANGUAGE_CODE')
def render(self, context):
try:
lang = self.lang_code.resolve(context)
except template.VariableDoesNotExist:
# no LANGUAGE_CODE variable found in context, just return ''
return ''
site = Site.objects.get_current().id # Django caches get_current()
cache_key = CACHE_PREFIX + self.key + lang + str(site)
content = cache.get(cache_key)
if content is None:
try:
chunk = Chunk.objects.get(key=self.key, lang_code=lang, site=site)
content = chunk.content
except Chunk.DoesNotExist:
# cache missing models as empty chunk strings
content = ''
if self.cache_time > 0:
# don't even call cache if timeout is 0
cache.set(cache_key, content, self.cache_time)
return content
register.tag('chunk', do_get_chunk)
| bsd-3-clause |
lucidbard/mendeley-python-sdk | test/resources/documents/test_list_group_documents.py | 2 | 1175 | from test import get_user_session, cassette
from test.resources.documents import delete_all_group_documents, create_group_document
def test_should_page_through_group_documents():
session = get_user_session()
delete_all_group_documents()
with cassette('fixtures/resources/documents/list_group_documents/page_through_documents.yaml'):
create_group_document(session, 'title 1')
create_group_document(session, 'title 2')
create_group_document(session, 'title 3')
first_page = session.groups.get('164d48fb-2343-332d-b566-1a4884a992e4').documents.list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 3
assert first_page.items[0].title == 'title 1'
assert first_page.items[0].group.name == 'Basket weaving'
assert first_page.items[1].title == 'title 2'
assert first_page.items[1].group.name == 'Basket weaving'
second_page = first_page.next_page
assert len(second_page.items) == 1
assert second_page.count == 3
assert second_page.items[0].title == 'title 3'
assert second_page.items[0].group.name == 'Basket weaving' | apache-2.0 |
ihipi/Sick-Beard | lib/hachoir_metadata/audio.py | 90 | 15495 | from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_parser.audio import AuFile, MpegAudioFile, RealAudioFile, AiffFile, FlacParser
from lib.hachoir_parser.container import OggFile, RealMediaFile
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makePrintable, timedelta2seconds, humanBitRate
from datetime import timedelta
from lib.hachoir_metadata.metadata_item import QUALITY_FAST, QUALITY_NORMAL, QUALITY_BEST
from lib.hachoir_metadata.safe import fault_tolerant, getValue
def computeComprRate(meta, size):
if not meta.has("duration") \
or not meta.has("sample_rate") \
or not meta.has("bits_per_sample") \
or not meta.has("nb_channel") \
or not size:
return
orig_size = timedelta2seconds(meta.get("duration")) * meta.get('sample_rate') * meta.get('bits_per_sample') * meta.get('nb_channel')
meta.compr_rate = float(orig_size) / size
def computeBitRate(meta):
if not meta.has("bits_per_sample") \
or not meta.has("nb_channel") \
or not meta.has("sample_rate"):
return
meta.bit_rate = meta.get('bits_per_sample') * meta.get('nb_channel') * meta.get('sample_rate')
VORBIS_KEY_TO_ATTR = {
"ARTIST": "artist",
"ALBUM": "album",
"TRACKNUMBER": "track_number",
"TRACKTOTAL": "track_total",
"ENCODER": "producer",
"TITLE": "title",
"LOCATION": "location",
"DATE": "creation_date",
"ORGANIZATION": "organization",
"GENRE": "music_genre",
"": "comment",
"COMPOSER": "music_composer",
"DESCRIPTION": "comment",
"COMMENT": "comment",
"WWW": "url",
"WOAF": "url",
"LICENSE": "copyright",
}
@fault_tolerant
def readVorbisComment(metadata, comment):
metadata.producer = getValue(comment, "vendor")
for item in comment.array("metadata"):
if "=" in item.value:
key, value = item.value.split("=", 1)
key = key.upper()
if key in VORBIS_KEY_TO_ATTR:
key = VORBIS_KEY_TO_ATTR[key]
setattr(metadata, key, value)
elif value:
metadata.warning("Skip Vorbis comment %s: %s" % (key, value))
class OggMetadata(MultipleMetadata):
def extract(self, ogg):
granule_quotient = None
for index, page in enumerate(ogg.array("page")):
if "segments" not in page:
continue
page = page["segments"]
if "vorbis_hdr" in page:
meta = Metadata(self)
self.vorbisHeader(page["vorbis_hdr"], meta)
self.addGroup("audio[]", meta, "Audio")
if not granule_quotient and meta.has("sample_rate"):
granule_quotient = meta.get('sample_rate')
if "theora_hdr" in page:
meta = Metadata(self)
self.theoraHeader(page["theora_hdr"], meta)
self.addGroup("video[]", meta, "Video")
if "video_hdr" in page:
meta = Metadata(self)
self.videoHeader(page["video_hdr"], meta)
self.addGroup("video[]", meta, "Video")
if not granule_quotient and meta.has("frame_rate"):
granule_quotient = meta.get('frame_rate')
if "comment" in page:
readVorbisComment(self, page["comment"])
if 3 <= index:
# Only process pages 0..3
break
# Compute duration
if granule_quotient and QUALITY_NORMAL <= self.quality:
page = ogg.createLastPage()
if page and "abs_granule_pos" in page:
try:
self.duration = timedelta(seconds=float(page["abs_granule_pos"].value) / granule_quotient)
except OverflowError:
pass
def videoHeader(self, header, meta):
meta.compression = header["fourcc"].display
meta.width = header["width"].value
meta.height = header["height"].value
meta.bits_per_pixel = header["bits_per_sample"].value
if header["time_unit"].value:
meta.frame_rate = 10000000.0 / header["time_unit"].value
def theoraHeader(self, header, meta):
meta.compression = "Theora"
meta.format_version = "Theora version %u.%u (revision %u)" % (\
header["version_major"].value,
header["version_minor"].value,
header["version_revision"].value)
meta.width = header["frame_width"].value
meta.height = header["frame_height"].value
if header["fps_den"].value:
meta.frame_rate = float(header["fps_num"].value) / header["fps_den"].value
if header["aspect_ratio_den"].value:
meta.aspect_ratio = float(header["aspect_ratio_num"].value) / header["aspect_ratio_den"].value
meta.pixel_format = header["pixel_format"].display
meta.comment = "Quality: %s" % header["quality"].value
def vorbisHeader(self, header, meta):
meta.compression = u"Vorbis"
meta.sample_rate = header["audio_sample_rate"].value
meta.nb_channel = header["audio_channels"].value
meta.format_version = u"Vorbis version %s" % header["vorbis_version"].value
meta.bit_rate = header["bitrate_nominal"].value
class AuMetadata(RootMetadata):
def extract(self, audio):
self.sample_rate = audio["sample_rate"].value
self.nb_channel = audio["channels"].value
self.compression = audio["codec"].display
if "info" in audio:
self.comment = audio["info"].value
self.bits_per_sample = audio.getBitsPerSample()
computeBitRate(self)
if "audio_data" in audio:
if self.has("bit_rate"):
self.duration = timedelta(seconds=float(audio["audio_data"].size) / self.get('bit_rate'))
computeComprRate(self, audio["audio_data"].size)
class RealAudioMetadata(RootMetadata):
FOURCC_TO_BITRATE = {
u"28_8": 15200, # 28.8 kbit/sec (audio bit rate: 15.2 kbit/s)
u"14_4": 8000, # 14.4 kbit/sec
u"lpcJ": 8000, # 14.4 kbit/sec
}
def extract(self, real):
version = real["version"].value
if "metadata" in real:
self.useMetadata(real["metadata"])
self.useRoot(real)
self.format_version = "Real audio version %s" % version
if version == 3:
size = getValue(real, "data_size")
elif "filesize" in real and "headersize" in real:
size = (real["filesize"].value + 40) - (real["headersize"].value + 16)
else:
size = None
if size:
size *= 8
if self.has("bit_rate"):
sec = float(size) / self.get('bit_rate')
self.duration = timedelta(seconds=sec)
computeComprRate(self, size)
@fault_tolerant
def useMetadata(self, info):
self.title = info["title"].value
self.author = info["author"].value
self.copyright = info["copyright"].value
self.comment = info["comment"].value
@fault_tolerant
def useRoot(self, real):
self.bits_per_sample = 16 # FIXME: Is that correct?
if real["version"].value != 3:
self.sample_rate = real["sample_rate"].value
self.nb_channel = real["channels"].value
else:
self.sample_rate = 8000
self.nb_channel = 1
fourcc = getValue(real, "FourCC")
if fourcc:
self.compression = fourcc
try:
self.bit_rate = self.FOURCC_TO_BITRATE[fourcc]
except LookupError:
pass
class RealMediaMetadata(MultipleMetadata):
KEY_TO_ATTR = {
"generated by": "producer",
"creation date": "creation_date",
"modification date": "last_modification",
"description": "comment",
}
def extract(self, media):
if "file_prop" in media:
self.useFileProp(media["file_prop"])
if "content_desc" in media:
self.useContentDesc(media["content_desc"])
for index, stream in enumerate(media.array("stream_prop")):
self.useStreamProp(stream, index)
@fault_tolerant
def useFileInfoProp(self, prop):
key = prop["name"].value.lower()
value = prop["value"].value
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif value:
self.warning("Skip %s: %s" % (prop["name"].value, value))
@fault_tolerant
def useFileProp(self, prop):
self.bit_rate = prop["avg_bit_rate"].value
self.duration = timedelta(milliseconds=prop["duration"].value)
@fault_tolerant
def useContentDesc(self, content):
self.title = content["title"].value
self.author = content["author"].value
self.copyright = content["copyright"].value
self.comment = content["comment"].value
@fault_tolerant
def useStreamProp(self, stream, index):
meta = Metadata(self)
meta.comment = "Start: %s" % stream["stream_start"].value
if getValue(stream, "mime_type") == "logical-fileinfo":
for prop in stream.array("file_info/prop"):
self.useFileInfoProp(prop)
else:
meta.bit_rate = stream["avg_bit_rate"].value
meta.duration = timedelta(milliseconds=stream["duration"].value)
meta.mime_type = getValue(stream, "mime_type")
meta.title = getValue(stream, "desc")
self.addGroup("stream[%u]" % index, meta, "Stream #%u" % (1+index))
class MpegAudioMetadata(RootMetadata):
TAG_TO_KEY = {
# ID3 version 2.2
"TP1": "author",
"COM": "comment",
"TEN": "producer",
"TRK": "track_number",
"TAL": "album",
"TT2": "title",
"TYE": "creation_date",
"TCO": "music_genre",
# ID3 version 2.3+
"TPE1": "author",
"COMM": "comment",
"TENC": "producer",
"TRCK": "track_number",
"TALB": "album",
"TIT2": "title",
"TYER": "creation_date",
"WXXX": "url",
"TCON": "music_genre",
"TLAN": "language",
"TCOP": "copyright",
"TDAT": "creation_date",
"TRDA": "creation_date",
"TORY": "creation_date",
"TIT1": "title",
}
def processID3v2(self, field):
# Read value
if "content" not in field:
return
content = field["content"]
if "text" not in content:
return
if "title" in content and content["title"].value:
value = "%s: %s" % (content["title"].value, content["text"].value)
else:
value = content["text"].value
# Known tag?
tag = field["tag"].value
if tag not in self.TAG_TO_KEY:
if tag:
if isinstance(tag, str):
tag = makePrintable(tag, "ISO-8859-1", to_unicode=True)
self.warning("Skip ID3v2 tag %s: %s" % (tag, value))
return
key = self.TAG_TO_KEY[tag]
setattr(self, key, value)
def readID3v2(self, id3):
for field in id3:
if field.is_field_set and "tag" in field:
self.processID3v2(field)
def extract(self, mp3):
if "/frames/frame[0]" in mp3:
frame = mp3["/frames/frame[0]"]
self.nb_channel = (frame.getNbChannel(), frame["channel_mode"].display)
self.format_version = u"MPEG version %s layer %s" % \
(frame["version"].display, frame["layer"].display)
self.sample_rate = frame.getSampleRate()
self.bits_per_sample = 16
if mp3["frames"].looksConstantBitRate():
self.computeBitrate(frame)
else:
self.computeVariableBitrate(mp3)
if "id3v1" in mp3:
id3 = mp3["id3v1"]
self.comment = id3["comment"].value
self.author = id3["author"].value
self.title = id3["song"].value
self.album = id3["album"].value
if id3["year"].value != "0":
self.creation_date = id3["year"].value
if "track_nb" in id3:
self.track_number = id3["track_nb"].value
if "id3v2" in mp3:
self.readID3v2(mp3["id3v2"])
if "frames" in mp3:
computeComprRate(self, mp3["frames"].size)
def computeBitrate(self, frame):
bit_rate = frame.getBitRate() # may returns None on error
if not bit_rate:
return
self.bit_rate = (bit_rate, _("%s (constant)") % humanBitRate(bit_rate))
self.duration = timedelta(seconds=float(frame["/frames"].size) / bit_rate)
def computeVariableBitrate(self, mp3):
if self.quality <= QUALITY_FAST:
return
count = 0
if QUALITY_BEST <= self.quality:
self.warning("Process all MPEG audio frames to compute exact duration")
max_count = None
else:
max_count = 500 * self.quality
total_bit_rate = 0.0
for index, frame in enumerate(mp3.array("frames/frame")):
if index < 3:
continue
bit_rate = frame.getBitRate()
if bit_rate:
total_bit_rate += float(bit_rate)
count += 1
if max_count and max_count <= count:
break
if not count:
return
bit_rate = total_bit_rate / count
self.bit_rate = (bit_rate,
_("%s (Variable bit rate)") % humanBitRate(bit_rate))
duration = timedelta(seconds=float(mp3["frames"].size) / bit_rate)
self.duration = duration
class AiffMetadata(RootMetadata):
def extract(self, aiff):
if "common" in aiff:
self.useCommon(aiff["common"])
computeBitRate(self)
@fault_tolerant
def useCommon(self, info):
self.nb_channel = info["nb_channel"].value
self.bits_per_sample = info["sample_size"].value
self.sample_rate = getValue(info, "sample_rate")
if self.has("sample_rate"):
rate = self.get("sample_rate")
if rate:
sec = float(info["nb_sample"].value) / rate
self.duration = timedelta(seconds=sec)
if "codec" in info:
self.compression = info["codec"].display
class FlacMetadata(RootMetadata):
def extract(self, flac):
if "metadata/stream_info/content" in flac:
self.useStreamInfo(flac["metadata/stream_info/content"])
if "metadata/comment/content" in flac:
readVorbisComment(self, flac["metadata/comment/content"])
@fault_tolerant
def useStreamInfo(self, info):
self.nb_channel = info["nb_channel"].value + 1
self.bits_per_sample = info["bits_per_sample"].value + 1
self.sample_rate = info["sample_hertz"].value
sec = info["total_samples"].value
if sec:
sec = float(sec) / info["sample_hertz"].value
self.duration = timedelta(seconds=sec)
registerExtractor(AuFile, AuMetadata)
registerExtractor(MpegAudioFile, MpegAudioMetadata)
registerExtractor(OggFile, OggMetadata)
registerExtractor(RealMediaFile, RealMediaMetadata)
registerExtractor(RealAudioFile, RealAudioMetadata)
registerExtractor(AiffFile, AiffMetadata)
registerExtractor(FlacParser, FlacMetadata)
| gpl-3.0 |
CognitiveScale/ansible-modules-extras | notification/mqtt.py | 101 | 4848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: mqtt
short_description: Publish a message on an MQTT topic for the IoT
version_added: "1.2"
description:
- Publish a message on an MQTT topic.
options:
server:
description:
- MQTT broker address/name
required: false
default: localhost
port:
description:
- MQTT broker port number
required: false
default: 1883
username:
description:
- Username to authenticate against the broker.
required: false
password:
description:
- Password for C(username) to authenticate against the broker.
required: false
client_id:
description:
- MQTT client identifier
required: false
default: hostname + pid
topic:
description:
- MQTT topic name
required: true
default: null
payload:
description:
- Payload. The special string C("None") may be used to send a NULL
(i.e. empty) payload which is useful to simply notify with the I(topic)
or to clear previously retained messages.
required: true
default: null
qos:
description:
- QoS (Quality of Service)
required: false
default: 0
choices: [ "0", "1", "2" ]
retain:
description:
- Setting this flag causes the broker to retain (i.e. keep) the message so that
applications that subsequently subscribe to the topic can received the last
retained message immediately.
required: false
default: False
# informational: requirements for nodes
requirements: [ mosquitto ]
notes:
- This module requires a connection to an MQTT broker such as Mosquitto
U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
author: "Jan-Piet Mens (@jpmens)"
'''
EXAMPLES = '''
- local_action: mqtt
topic=service/ansible/{{ ansible_hostname }}
payload="Hello at {{ ansible_date_time.iso8601 }}"
qos=0
retain=false
client_id=ans001
'''
# ===========================================
# MQTT module support methods.
#
HAS_PAHOMQTT = True
try:
import socket
import paho.mqtt.publish as mqtt
except ImportError:
HAS_PAHOMQTT = False
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server = dict(default = 'localhost'),
port = dict(default = 1883),
topic = dict(required = True),
payload = dict(required = True),
client_id = dict(default = None),
qos = dict(default="0", choices=["0", "1", "2"]),
retain = dict(default=False, type='bool'),
username = dict(default = None),
password = dict(default = None),
),
supports_check_mode=True
)
if not HAS_PAHOMQTT:
module.fail_json(msg="Paho MQTT is not installed")
server = module.params.get("server", 'localhost')
port = module.params.get("port", 1883)
topic = module.params.get("topic")
payload = module.params.get("payload")
client_id = module.params.get("client_id", '')
qos = int(module.params.get("qos", 0))
retain = module.params.get("retain")
username = module.params.get("username", None)
password = module.params.get("password", None)
if client_id is None:
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
if payload and payload == 'None':
payload = None
auth=None
if username is not None:
auth = { 'username' : username, 'password' : password }
try:
rc = mqtt.single(topic, payload,
qos=qos,
retain=retain,
client_id=client_id,
hostname=server,
port=port,
auth=auth)
except Exception, e:
module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
module.exit_json(changed=False, topic=topic)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
pbrod/numpy | numpy/core/fromnumeric.py | 2 | 122368 | """Module containing non-deprecated functions borrowed from Numeric.
"""
import functools
import types
import warnings
import numpy as np
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
yield from choices
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode='raise'`` (the default), then, first of all, each element of
``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
position in ``Ba`` - then the value at the same position in the new array
is the value in ``Bchoices[i]`` at that same position;
* if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
integer; negative integers are mapped to 0; values greater than ``n-1``
are mapped to ``n-1``; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in ``[0, n-1]``, where ``n`` is the
number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
cases any integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if ``mode='raise'``; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside ``[0, n-1]`` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod ``n``
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__)) from e
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Refer to `numpy.ndarray.transpose` for full documentation.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
ndarray.transpose : Equivalent method
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort.
take_along_axis : Apply ``index_array`` from argpartition
to an array as if by calling partition.
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
Multi-dimensional array:
>>> x = np.array([[3, 4, 2], [1, 3, 1]])
>>> index_array = np.argpartition(x, kth=1, axis=-1)
>>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
array([[2, 3, 4],
[1, 1, 3]])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated iterating over the array in C-order.
See Also
--------
np.reshape : Reshape an array without changing the total size.
np.pad : Enlarge and pad an array.
np.repeat : Repeat elements of an array.
ndarray.resize : resize an array in-place.
Notes
-----
When the total size of the array does not change `~numpy.reshape` should
be used. In most other cases either indexing (to reduce the size)
or padding (to increase the size) may be a more appropriate solution.
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, iterating
over `a` in C-order, disregarding axes (and cycling back from the start if
the new shape is larger). This functionality is therefore not suitable to
resize images, or data where each axis represents a separate and distinct
entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError('all elements of `new_shape` must be non-negative')
if a.size == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return np.zeros_like(a, shape=new_shape)
repeats = -(-new_size // a.size) # ceil division
a = concatenate((a,) * repeats)[:new_size]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove axes of length one from `a`.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the entries of length one in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`. Note that if all axes are squeezed,
the result is a 0d array and not a scalar.
Raises
------
ValueError
If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding entries of length one
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
1234
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast_1d(a))``.
.. deprecated:: 1.17.0
Use `atleast_1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
extract : Equivalent method when working on 1-D arrays
:ref:`ufuncs-output-type`
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min, a_max : array_like or None
Minimum and maximum value. If ``None``, clipping is not performed on
the corresponding edge. Only one of `a_min` and `a_max` may be
``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.17.0
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`,
as shown in the second example.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> np.clip(a, 8, 1)
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
add.reduce : Equivalent functionality of `add`.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
For floating point numbers the numerical precision of sum (and
``np.add.reduce``) is in general limited by directly adding each number
individually to the result causing rounding errors in every step.
However, often numpy will use a numerically better approach (partial
pairwise summation) leading to improved precision in many use-cases.
This improved precision is always provided when no ``axis`` is given.
When ``axis`` is given, it will depend on which axis is summed.
Technically, to provide the best speed possible, the improved precision
is only used when the summation is along the fast axis in memory.
Note that the exact precision may vary depending on other parameters.
In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
more precise approach to summation.
Especially when summing a large number of lower precision floating point
numbers, such as ``float32``, numerical errors can become significant.
In such cases it can be advisable to use `dtype="float64"` to use a higher
precision for the output.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=np._NoValue):
return (a, where, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for any `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> np.any([[True, False], [False, False]], where=[[False], [True]])
False
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
keepdims=keepdims, where=where)
def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for all `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> np.all([[True, True], [False, True]], where=[[True], [False]])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
keepdims=keepdims, where=where)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.array([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> np.ptp(x, axis=1)
array([8, 6])
>>> np.ptp(x, axis=0)
array([2, 0, 5, 2])
>>> np.ptp(x)
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.array([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> np.ptp(y, axis=1)
array([ 126, 127, -128, -127], dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> np.ptp(y, axis=1).view(np.uint8)
array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.max([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.max([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.min([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.min([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _alen_dispathcer(a):
return (a,)
@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
.. deprecated:: 1.18
`numpy.alen` is deprecated, use `len` instead.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
# NumPy 1.18.0, 2019-08-02
warnings.warn(
"`np.alen` is deprecated, use `len` instead",
DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
``np.around`` uses a fast but sometimes inexact algorithm to round
floating-point datatypes. For positive `decimals` it is equivalent to
``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
error due to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling by powers
of ten. For instance, note the extra "1" in the following:
>>> np.round(56294995342131.5, 3)
56294995342131.51
If your goal is to print such values with a fixed number of decimals, it is
preferable to use numpy's float printing routines to limit the number of
printed decimals:
>>> np.format_float_positional(56294995342131.5, precision=3)
'56294995342131.5'
The float printing routines use an accurate but much more computationally
demanding algorithm to compute the number of digits after the decimal
point.
Alternatively, Python's builtin `round` function uses a more accurate
but slower algorithm for 64-bit floating point values:
>>> round(56294995342131.5, 3)
56294995342131.5
>>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
(16.06, 16.05)
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
Specifying a where argument:
>>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
>>> np.mean(a)
12.0
>>> np.mean(a, where=[[True], [False], [False]])
9.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the standard deviation.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
where ``N = len(x)``. If, however, `ddof` is specified, the divisor
``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
provides an unbiased estimator of the variance of the infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables. The standard deviation computed in this
function is the square root of the estimated variance, so even with
``ddof=1``, it will not be an unbiased estimate of the standard deviation
per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
>>> np.std(a, where=[[True], [True], [False]])
2.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the variance. See `~numpy.ufunc.reduce` for
details.
.. versionadded:: 1.20.0
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
>>> np.var(a, where=[[True], [True], [False]])
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
| bsd-3-clause |
fhedberg/ardupilot | libraries/AP_HAL/examples/DSP_test/gyro_isb_reader.py | 21 | 4048 | #!/usr/bin/env python
'''
extract ISBH and ISBD messages from AP_Logging files and produce C++ arrays for consumption by the DSP subsystem
'''
from __future__ import print_function
import os
import sys
import time
import copy
import numpy
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def isb_parser(logfile):
'''display fft for raw ACC data in logfile'''
'''object to store data about a single FFT plot'''
class ISBData(object):
def __init__(self, ffth):
self.seqno = -1
self.fftnum = ffth.N
self.sensor_type = ffth.type
self.instance = ffth.instance
self.sample_rate_hz = ffth.smp_rate
self.multiplier = ffth.mul
self.data = {}
self.data["X"] = []
self.data["Y"] = []
self.data["Z"] = []
self.holes = False
self.freq = None
def add_isb(self, fftd):
if fftd.N != self.fftnum:
print("Skipping ISBD with wrong fftnum (%u vs %u)\n" % (fftd.fftnum, self.fftnum), file=sys.stderr)
return
if self.holes:
print("Skipping ISBD(%u) for ISBH(%u) with holes in it" % (fftd.seqno, self.fftnum), file=sys.stderr)
return
if fftd.seqno != self.seqno+1:
print("ISBH(%u) has holes in it" % fftd.N, file=sys.stderr)
self.holes = True
return
self.seqno += 1
self.data["X"].extend(fftd.x)
self.data["Y"].extend(fftd.y)
self.data["Z"].extend(fftd.z)
def prefix(self):
if self.sensor_type == 0:
return "Accel"
elif self.sensor_type == 1:
return "Gyro"
else:
return "?Unknown Sensor Type?"
def tag(self):
return str(self)
def __str__(self):
return "%s[%u]" % (self.prefix(), self.instance)
mlog = mavutil.mavlink_connection(logfile)
isb_frames = []
isbdata = None
msgcount = 0
start_time = time.time()
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
msgcount += 1
if msgcount % 1000 == 0:
sys.stderr.write(".")
msg_type = m.get_type()
if msg_type == "ISBH":
if isbdata is not None:
sensor = isbdata.tag()
isb_frames.append(isbdata)
isbdata = ISBData(m)
continue
if msg_type == "ISBD":
if isbdata is None:
sys.stderr.write("?(fftnum=%u)" % m.N)
continue
isbdata.add_isb(m)
print("", file=sys.stderr)
time_delta = time.time() - start_time
print("Extracted %u fft data sets" % len(isb_frames), file=sys.stderr)
sample_rates = {}
counts = {}
print("#include \"GyroFrame.h\"")
print("const uint32_t NUM_FRAMES = %d;" % len(isb_frames))
print("const GyroFrame gyro_frames[] = {")
sample_rate = None
for isb_frame in isb_frames:
if isb_frame.sensor_type == 1 and isb_frame.instance == 0: # gyro data
print(" {")
for axis in [ "X","Y","Z" ]:
# normalize data
d = numpy.array(isb_frame.data[axis]) # / float(isb_frame.multiplier)
print(" { " + ", ".join(d.astype(numpy.dtype(str))) + " },")
if len(d) == 0:
print("No data?!?!?!", file=sys.stderr)
if sample_rate is None:
sample_rate = isb_frame.sample_rate_hz
print(" },")
print("};")
print("const uint16_t SAMPLE_RATE = %d;" % sample_rate)
for filename in args.logs:
isb_parser(filename)
| gpl-3.0 |
TedaLIEz/sentry | tests/sentry/api/bases/test_project.py | 23 | 2981 | from __future__ import absolute_import
from mock import Mock
from sentry.api.bases.project import ProjectPermission
from sentry.models import ApiKey, OrganizationMemberType, ProjectKey
from sentry.testutils import TestCase
class ProjectPermissionBase(TestCase):
def setUp(self):
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
super(ProjectPermissionBase, self).setUp()
def has_object_perm(self, auth, user, obj, method='GET'):
perm = ProjectPermission()
request = Mock()
request.auth = auth
request.user = user
request.method = method
return perm.has_object_permission(request, None, obj)
class ProjectPermissionTest(ProjectPermissionBase):
def test_regular_user(self):
user = self.create_user()
assert not self.has_object_perm(None, user, self.project)
def test_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm(None, user, self.project)
def test_org_member_without_team_access(self):
user = self.create_user()
om = self.create_member(
user=user,
organization=self.org,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
assert not self.has_object_perm(None, user, self.project)
def test_org_member_with_global_access(self):
user = self.create_user()
om = self.create_member(
user=user,
organization=self.org,
type=OrganizationMemberType.MEMBER,
has_global_access=True,
)
assert self.has_object_perm(None, user, self.project)
def test_org_member_with_team_access(self):
user = self.create_user()
om = self.create_member(
user=user,
organization=self.org,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
teams=[self.team]
)
assert self.has_object_perm(None, user, self.project)
def test_project_key_with_project_access(self):
key = ProjectKey.objects.create(
project=self.project,
)
assert self.has_object_perm(key, None, self.project)
def test_project_key_without_project_access(self):
key = ProjectKey.objects.create(
project=self.create_project(organization=self.org),
)
assert not self.has_object_perm(key, None, self.project)
def test_api_key_with_org_access(self):
key = ApiKey.objects.create(
organization=self.org,
)
assert self.has_object_perm(key, None, self.project)
def test_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(),
)
assert not self.has_object_perm(key, None, self.project)
| bsd-3-clause |
ashvina/heron | heron/tools/tracker/src/python/main.py | 4 | 10515 | #!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' main.py '''
from __future__ import print_function
import argparse
import os
import signal
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import define
from tornado.httpclient import AsyncHTTPClient
import heron.tools.common.src.python.utils.config as common_config
import heron.common.src.python.utils.log as log
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python import handlers
from heron.tools.tracker.src.python import utils
from heron.tools.tracker.src.python.config import Config, STATEMGRS_KEY
from heron.tools.tracker.src.python.tracker import Tracker
Log = log.Log
class Application(tornado.web.Application):
""" Tornado server application """
def __init__(self, config):
AsyncHTTPClient.configure(None, defaults=dict(request_timeout=120.0))
self.tracker = Tracker(config)
self.tracker.synch_topologies()
tornadoHandlers = [
(r"/", handlers.MainHandler),
(r"/clusters", handlers.ClustersHandler, {"tracker":self.tracker}),
(r"/topologies", handlers.TopologiesHandler, {"tracker":self.tracker}),
(r"/topologies/states", handlers.StatesHandler, {"tracker":self.tracker}),
(r"/topologies/info", handlers.TopologyHandler, {"tracker":self.tracker}),
(r"/topologies/logicalplan", handlers.LogicalPlanHandler, {"tracker":self.tracker}),
(r"/topologies/config", handlers.TopologyConfigHandler, {"tracker":self.tracker}),
(r"/topologies/containerfiledata", handlers.ContainerFileDataHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfiledownload", handlers.ContainerFileDownloadHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfilestats",
handlers.ContainerFileStatsHandler, {"tracker":self.tracker}),
(r"/topologies/physicalplan", handlers.PhysicalPlanHandler, {"tracker":self.tracker}),
(r"/topologies/packingplan", handlers.PackingPlanHandler, {"tracker":self.tracker}),
# Deprecated. See https://github.com/apache/incubator-heron/issues/1754
(r"/topologies/executionstate", handlers.ExecutionStateHandler, {"tracker":self.tracker}),
(r"/topologies/schedulerlocation", handlers.SchedulerLocationHandler,
{"tracker":self.tracker}),
(r"/topologies/metadata", handlers.MetaDataHandler, {"tracker":self.tracker}),
(r"/topologies/runtimestate", handlers.RuntimeStateHandler, {"tracker":self.tracker}),
(r"/topologies/metrics", handlers.MetricsHandler, {"tracker":self.tracker}),
(r"/topologies/metricstimeline", handlers.MetricsTimelineHandler, {"tracker":self.tracker}),
(r"/topologies/metricsquery", handlers.MetricsQueryHandler, {"tracker":self.tracker}),
(r"/topologies/exceptions", handlers.ExceptionHandler, {"tracker":self.tracker}),
(r"/topologies/exceptionsummary", handlers.ExceptionSummaryHandler,
{"tracker":self.tracker}),
(r"/machines", handlers.MachinesHandler, {"tracker":self.tracker}),
(r"/topologies/pid", handlers.PidHandler, {"tracker":self.tracker}),
(r"/topologies/jstack", handlers.JstackHandler, {"tracker":self.tracker}),
(r"/topologies/jmap", handlers.JmapHandler, {"tracker":self.tracker}),
(r"/topologies/histo", handlers.MemoryHistogramHandler, {"tracker":self.tracker}),
(r"(.*)", handlers.DefaultHandler),
]
settings = dict(
debug=True,
serve_traceback=True,
static_path=os.path.dirname(__file__)
)
tornado.web.Application.__init__(self, tornadoHandlers, **settings)
Log.info("Tracker has started")
def stop(self):
self.tracker.stop_sync()
# pylint: disable=protected-access
class _HelpAction(argparse._HelpAction):
""" HelpAction """
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("Subparser '{}'".format(choice))
print(subparser.format_help())
parser.exit()
# pylint: disable=bad-super-call
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
""" Subcommand help formatter """
def _format_action(self, action):
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
def add_titles(parser):
""" add titles """
parser._positionals.title = "Required arguments"
parser._optionals.title = "Optional arguments"
return parser
def add_arguments(parser):
""" add arguments """
default_config_file = os.path.join(
utils.get_heron_tracker_conf_dir(), constants.DEFAULT_CONFIG_FILE)
parser.add_argument(
'--config-file',
metavar='(a string; path to config file; default: "' + default_config_file + '")',
default=default_config_file)
parser.add_argument(
'--type',
metavar='(an string; type of state manager (zookeeper or file, etc.); example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TYPE) + ')',
choices=["file", "zookeeper"])
parser.add_argument(
'--name',
metavar='(an string; name to be used for the state manager; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_NAME) + ')')
parser.add_argument(
'--rootpath',
metavar='(an string; where all the states are stored; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_ROOTPATH) + ')')
parser.add_argument(
'--tunnelhost',
metavar='(an string; if ssh tunneling needs to be established to connect to it; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TUNNELHOST) + ')')
parser.add_argument(
'--hostport',
metavar='(an string; only used to connect to zk, must be of the form \'host:port\';'\
' example: ' + str(constants.DEFAULT_STATE_MANAGER_HOSTPORT) + ')')
parser.add_argument(
'--port',
metavar='(an integer; port to listen; default: ' + str(constants.DEFAULT_PORT) + ')',
type=int,
default=constants.DEFAULT_PORT)
parser.add_argument(
'--verbose',
action='store_true')
return parser
def create_parsers():
""" create argument parser """
parser = argparse.ArgumentParser(
epilog='For detailed documentation, go to http://github.com/apache/incubator-heron',
usage="%(prog)s [options] [help]",
add_help=False)
parser = add_titles(parser)
parser = add_arguments(parser)
ya_parser = argparse.ArgumentParser(
parents=[parser],
formatter_class=SubcommandHelpFormatter,
add_help=False)
subparsers = ya_parser.add_subparsers(
title="Available commands")
help_parser = subparsers.add_parser(
'help',
help='Prints help',
add_help=False)
help_parser.set_defaults(help=True)
subparsers.add_parser(
'version',
help='Prints version',
add_help=True)
return parser, ya_parser
def define_options(port, config_file):
""" define Tornado global variables """
define("port", default=port)
define("config_file", default=config_file)
def create_tracker_config(namespace):
# try to parse the config file if we find one
config_file = namespace["config_file"]
config = utils.parse_config_file(config_file)
if config is None:
Log.debug("Config file does not exists: %s" % config_file)
config = {STATEMGRS_KEY:[{}]}
# update the config if we have any flags
config_flags = ["type", "name", "rootpath", "tunnelhost", "hostport"]
config_to_update = config[STATEMGRS_KEY][0]
for flag in config_flags:
value = namespace.get(flag, None)
if value is not None:
config_to_update[flag] = value
return config
def main():
""" main """
# create the parser and parse the arguments
(parser, _) = create_parsers()
(args, remaining) = parser.parse_known_args()
if remaining == ['help']:
parser.print_help()
parser.exit()
elif remaining == ['version']:
common_config.print_build_info()
parser.exit()
elif remaining != []:
Log.error('Invalid subcommand')
sys.exit(1)
namespace = vars(args)
log.set_logging_level(namespace)
# set Tornado global option
define_options(namespace['port'], namespace['config_file'])
config = Config(create_tracker_config(namespace))
# create Tornado application
application = Application(config)
# pylint: disable=unused-argument
# SIGINT handler:
# 1. stop all the running zkstatemanager and filestatemanagers
# 2. stop the Tornado IO loop
def signal_handler(signum, frame):
# start a new line after ^C character because this looks nice
print('\n', end='')
application.stop()
tornado.ioloop.IOLoop.instance().stop()
# associate SIGINT and SIGTERM with a handler
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
Log.info("Running on port: %d", namespace['port'])
if namespace["config_file"]:
Log.info("Using config file: %s", namespace['config_file'])
Log.info("Using state manager:\n" + str(config))
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(namespace['port'])
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.