repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
benhoff/listenerplugins | listenerplugins/password.py | 24 | 2419 | import string
import random as std_random
from cloudbot import hook
try:
from Crypto.Random import random
gen = random.StrongRandom()
except ImportError:
# Just use the regular random module, not the strong one
gen = std_random.SystemRandom()
with open("data/password_words.txt") as f:
common_words = [line.strip() for line in f.readlines()]
@hook.command(autohelp=False)
def password(text, notice):
"""[length [types]] - generates a password of <length> (default 10). [types] can include 'alpha', 'no caps',
'numeric', 'symbols' or any combination: eg. 'numbers symbols'"""
okay = []
# find the length needed for the password
numb = text.split(" ")
try:
length = int(numb[0])
except ValueError:
length = 12
if length > 50:
notice("Maximum length is 50 characters.")
return
# add alpha characters
if "alpha" in text or "letter" in text:
okay += list(string.ascii_lowercase)
# adds capital characters if not told not to
if "no caps" not in text:
okay += list(string.ascii_uppercase)
# add numbers
if "numeric" in text or "number" in text:
okay += list(string.digits)
# add symbols
if "symbol" in text or "special" in text:
sym = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '=', '_', '+', '[', ']', '{', '}', '\\', '|', ';',
':', "'", '.', '>', ',', '<', '/', '?', '`', '~', '"']
okay += sym
# defaults to lowercase alpha + numbers password if the okay list is empty
if not okay:
okay = list(string.ascii_lowercase) + list(string.digits)
# extra random lel
random.shuffle(okay)
chars = []
for i in range(length):
chars.append(random.choice(okay))
notice("".join(chars))
@hook.command("wpass", "wordpass", "wordpassword", autohelp=False)
def word_password(text, notice):
"""[length] - generates an easy to remember password with [length] (default 4) commonly used words"""
try:
length = int(text)
except ValueError:
length = 3
if length > 10:
notice("Maximum length is 50 characters.")
return
words = []
# generate password
for x in range(length):
words.append(gen.choice(common_words))
notice("Your password is '{}'. Feel free to remove the spaces when using it.".format(" ".join(words)))
| gpl-3.0 |
a10networks/a10sdk-python | a10sdk/core/slb/slb_template_cipher.py | 2 | 7785 | from a10sdk.common.A10BaseClass import A10BaseClass
class CipherCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param priority: {"description": "Cipher priority (Cipher priority (default 1))", "format": "number", "default": 1, "maximum": 100, "minimum": 1, "type": "number"}
:param cipher_suite: {"enum": ["SSL3_RSA_DES_192_CBC3_SHA", "SSL3_RSA_DES_40_CBC_SHA", "SSL3_RSA_DES_64_CBC_SHA", "SSL3_RSA_RC4_128_MD5", "SSL3_RSA_RC4_128_SHA", "SSL3_RSA_RC4_40_MD5", "TLS1_RSA_AES_128_SHA", "TLS1_RSA_AES_256_SHA", "TLS1_RSA_EXPORT1024_RC4_56_MD5", "TLS1_RSA_EXPORT1024_RC4_56_SHA", "TLS1_RSA_AES_128_SHA256", "TLS1_RSA_AES_256_SHA256", "TLS1_DHE_RSA_AES_128_GCM_SHA256", "TLS1_DHE_RSA_AES_128_SHA", "TLS1_DHE_RSA_AES_128_SHA256", "TLS1_DHE_RSA_AES_256_GCM_SHA384", "TLS1_DHE_RSA_AES_256_SHA", "TLS1_DHE_RSA_AES_256_SHA256", "TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256", "TLS1_ECDHE_ECDSA_AES_128_SHA", "TLS1_ECDHE_ECDSA_AES_128_SHA256", "TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384", "TLS1_ECDHE_ECDSA_AES_256_SHA", "TLS1_ECDHE_RSA_AES_128_GCM_SHA256", "TLS1_ECDHE_RSA_AES_128_SHA", "TLS1_ECDHE_RSA_AES_128_SHA256", "TLS1_ECDHE_RSA_AES_256_GCM_SHA384", "TLS1_ECDHE_RSA_AES_256_SHA", "TLS1_RSA_AES_128_GCM_SHA256", "TLS1_RSA_AES_256_GCM_SHA384"], "type": "string", "description": "'SSL3_RSA_DES_192_CBC3_SHA': SSL3_RSA_DES_192_CBC3_SHA; 'SSL3_RSA_DES_40_CBC_SHA': SSL3_RSA_DES_40_CBC_SHA; 'SSL3_RSA_DES_64_CBC_SHA': SSL3_RSA_DES_64_CBC_SHA; 'SSL3_RSA_RC4_128_MD5': SSL3_RSA_RC4_128_MD5; 'SSL3_RSA_RC4_128_SHA': SSL3_RSA_RC4_128_SHA; 'SSL3_RSA_RC4_40_MD5': SSL3_RSA_RC4_40_MD5; 'TLS1_RSA_AES_128_SHA': TLS1_RSA_AES_128_SHA; 'TLS1_RSA_AES_256_SHA': TLS1_RSA_AES_256_SHA; 'TLS1_RSA_EXPORT1024_RC4_56_MD5': TLS1_RSA_EXPORT1024_RC4_56_MD5; 'TLS1_RSA_EXPORT1024_RC4_56_SHA': TLS1_RSA_EXPORT1024_RC4_56_SHA; 'TLS1_RSA_AES_128_SHA256': TLS1_RSA_AES_128_SHA256; 'TLS1_RSA_AES_256_SHA256': TLS1_RSA_AES_256_SHA256; 'TLS1_DHE_RSA_AES_128_GCM_SHA256': TLS1_DHE_RSA_AES_128_GCM_SHA256; 'TLS1_DHE_RSA_AES_128_SHA': TLS1_DHE_RSA_AES_128_SHA; 'TLS1_DHE_RSA_AES_128_SHA256': TLS1_DHE_RSA_AES_128_SHA256; 'TLS1_DHE_RSA_AES_256_GCM_SHA384': TLS1_DHE_RSA_AES_256_GCM_SHA384; 'TLS1_DHE_RSA_AES_256_SHA': TLS1_DHE_RSA_AES_256_SHA; 'TLS1_DHE_RSA_AES_256_SHA256': TLS1_DHE_RSA_AES_256_SHA256; 'TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256': TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256; 'TLS1_ECDHE_ECDSA_AES_128_SHA': TLS1_ECDHE_ECDSA_AES_128_SHA; 'TLS1_ECDHE_ECDSA_AES_128_SHA256': TLS1_ECDHE_ECDSA_AES_128_SHA256; 'TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384': TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384; 'TLS1_ECDHE_ECDSA_AES_256_SHA': TLS1_ECDHE_ECDSA_AES_256_SHA; 'TLS1_ECDHE_RSA_AES_128_GCM_SHA256': TLS1_ECDHE_RSA_AES_128_GCM_SHA256; 'TLS1_ECDHE_RSA_AES_128_SHA': TLS1_ECDHE_RSA_AES_128_SHA; 'TLS1_ECDHE_RSA_AES_128_SHA256': TLS1_ECDHE_RSA_AES_128_SHA256; 'TLS1_ECDHE_RSA_AES_256_GCM_SHA384': TLS1_ECDHE_RSA_AES_256_GCM_SHA384; 'TLS1_ECDHE_RSA_AES_256_SHA': TLS1_ECDHE_RSA_AES_256_SHA; 'TLS1_RSA_AES_128_GCM_SHA256': TLS1_RSA_AES_128_GCM_SHA256; 'TLS1_RSA_AES_256_GCM_SHA384': TLS1_RSA_AES_256_GCM_SHA384; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "cipher-cfg"
self.DeviceProxy = ""
self.priority = ""
self.cipher_suite = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Cipher(A10BaseClass):
"""Class Description::
SSL Cipher Template.
Class cipher supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param name: {"description": "Cipher Template Name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param cipher_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"priority": {"description": "Cipher priority (Cipher priority (default 1))", "format": "number", "default": 1, "maximum": 100, "minimum": 1, "type": "number"}, "cipher-suite": {"enum": ["SSL3_RSA_DES_192_CBC3_SHA", "SSL3_RSA_DES_40_CBC_SHA", "SSL3_RSA_DES_64_CBC_SHA", "SSL3_RSA_RC4_128_MD5", "SSL3_RSA_RC4_128_SHA", "SSL3_RSA_RC4_40_MD5", "TLS1_RSA_AES_128_SHA", "TLS1_RSA_AES_256_SHA", "TLS1_RSA_EXPORT1024_RC4_56_MD5", "TLS1_RSA_EXPORT1024_RC4_56_SHA", "TLS1_RSA_AES_128_SHA256", "TLS1_RSA_AES_256_SHA256", "TLS1_DHE_RSA_AES_128_GCM_SHA256", "TLS1_DHE_RSA_AES_128_SHA", "TLS1_DHE_RSA_AES_128_SHA256", "TLS1_DHE_RSA_AES_256_GCM_SHA384", "TLS1_DHE_RSA_AES_256_SHA", "TLS1_DHE_RSA_AES_256_SHA256", "TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256", "TLS1_ECDHE_ECDSA_AES_128_SHA", "TLS1_ECDHE_ECDSA_AES_128_SHA256", "TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384", "TLS1_ECDHE_ECDSA_AES_256_SHA", "TLS1_ECDHE_RSA_AES_128_GCM_SHA256", "TLS1_ECDHE_RSA_AES_128_SHA", "TLS1_ECDHE_RSA_AES_128_SHA256", "TLS1_ECDHE_RSA_AES_256_GCM_SHA384", "TLS1_ECDHE_RSA_AES_256_SHA", "TLS1_RSA_AES_128_GCM_SHA256", "TLS1_RSA_AES_256_GCM_SHA384"], "type": "string", "description": "'SSL3_RSA_DES_192_CBC3_SHA': SSL3_RSA_DES_192_CBC3_SHA; 'SSL3_RSA_DES_40_CBC_SHA': SSL3_RSA_DES_40_CBC_SHA; 'SSL3_RSA_DES_64_CBC_SHA': SSL3_RSA_DES_64_CBC_SHA; 'SSL3_RSA_RC4_128_MD5': SSL3_RSA_RC4_128_MD5; 'SSL3_RSA_RC4_128_SHA': SSL3_RSA_RC4_128_SHA; 'SSL3_RSA_RC4_40_MD5': SSL3_RSA_RC4_40_MD5; 'TLS1_RSA_AES_128_SHA': TLS1_RSA_AES_128_SHA; 'TLS1_RSA_AES_256_SHA': TLS1_RSA_AES_256_SHA; 'TLS1_RSA_EXPORT1024_RC4_56_MD5': TLS1_RSA_EXPORT1024_RC4_56_MD5; 'TLS1_RSA_EXPORT1024_RC4_56_SHA': TLS1_RSA_EXPORT1024_RC4_56_SHA; 'TLS1_RSA_AES_128_SHA256': TLS1_RSA_AES_128_SHA256; 'TLS1_RSA_AES_256_SHA256': TLS1_RSA_AES_256_SHA256; 'TLS1_DHE_RSA_AES_128_GCM_SHA256': TLS1_DHE_RSA_AES_128_GCM_SHA256; 'TLS1_DHE_RSA_AES_128_SHA': TLS1_DHE_RSA_AES_128_SHA; 'TLS1_DHE_RSA_AES_128_SHA256': TLS1_DHE_RSA_AES_128_SHA256; 'TLS1_DHE_RSA_AES_256_GCM_SHA384': TLS1_DHE_RSA_AES_256_GCM_SHA384; 'TLS1_DHE_RSA_AES_256_SHA': TLS1_DHE_RSA_AES_256_SHA; 'TLS1_DHE_RSA_AES_256_SHA256': TLS1_DHE_RSA_AES_256_SHA256; 'TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256': TLS1_ECDHE_ECDSA_AES_128_GCM_SHA256; 'TLS1_ECDHE_ECDSA_AES_128_SHA': TLS1_ECDHE_ECDSA_AES_128_SHA; 'TLS1_ECDHE_ECDSA_AES_128_SHA256': TLS1_ECDHE_ECDSA_AES_128_SHA256; 'TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384': TLS1_ECDHE_ECDSA_AES_256_GCM_SHA384; 'TLS1_ECDHE_ECDSA_AES_256_SHA': TLS1_ECDHE_ECDSA_AES_256_SHA; 'TLS1_ECDHE_RSA_AES_128_GCM_SHA256': TLS1_ECDHE_RSA_AES_128_GCM_SHA256; 'TLS1_ECDHE_RSA_AES_128_SHA': TLS1_ECDHE_RSA_AES_128_SHA; 'TLS1_ECDHE_RSA_AES_128_SHA256': TLS1_ECDHE_RSA_AES_128_SHA256; 'TLS1_ECDHE_RSA_AES_256_GCM_SHA384': TLS1_ECDHE_RSA_AES_256_GCM_SHA384; 'TLS1_ECDHE_RSA_AES_256_SHA': TLS1_ECDHE_RSA_AES_256_SHA; 'TLS1_RSA_AES_128_GCM_SHA256': TLS1_RSA_AES_128_GCM_SHA256; 'TLS1_RSA_AES_256_GCM_SHA384': TLS1_RSA_AES_256_GCM_SHA384; ", "format": "enum"}, "optional": true}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/template/cipher/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "cipher"
self.a10_url="/axapi/v3/slb/template/cipher/{name}"
self.DeviceProxy = ""
self.name = ""
self.cipher_cfg = []
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 |
rxtender/rxt-backend-base | test/python3/base_type/test_base_type.py | 1 | 1258 | from unittest import TestCase
from .base_type_rxt import SingleField, SingleField_deserialize, SingleField_serialize,MultiFields
class BaseTypeSerializationTestCase(TestCase):
def test_create_SingleField(self):
item = SingleField(42)
self.assertEqual(42, item.foo32)
def test_create_MultiFields(self):
item = MultiFields(1 ,2 ,3 ,4 ,True ,1.2, "baz")
self.assertEqual(1, item.foo32)
self.assertEqual(2, item.bar32)
self.assertEqual(3, item.foo64)
self.assertEqual(4, item.bar64)
self.assertEqual(True, item.biz)
self.assertEqual(1.2, item.buz)
self.assertEqual('baz', item.name)
def test_deserialize_SingleField(self):
itemJson = '{"foo32": 42}'
item = SingleField_deserialize(itemJson)
self.assertIs(type(item), SingleField)
self.assertEqual(42, item.foo32)
def test_deserialize_SingleField_failure(self):
itemJson = '{}'
item = SingleField_deserialize(itemJson)
self.assertIs(item, None)
def test_serialize_SingleField(self):
expectedJson = '{"foo32": 42}'
item = SingleField(42)
itemJson = SingleField_serialize(item);
self.assertEqual(expectedJson, itemJson)
| mit |
ayushin78/coala | tests/settings/DocstringMetadataTest.py | 33 | 3492 | import unittest
from coalib.settings.DocstringMetadata import DocstringMetadata
from collections import OrderedDict
class DocstringMetadataTest(unittest.TestCase):
def test_from_docstring(self):
self.check_from_docstring_dataset('')
self.check_from_docstring_dataset(' description only ',
desc='description only')
self.check_from_docstring_dataset(' :param test: test description ',
param_dict={
'test': 'test description'})
self.check_from_docstring_dataset(' @param test: test description ',
param_dict={
'test': 'test description'})
self.check_from_docstring_dataset(' :return: something ',
retval_desc='something')
self.check_from_docstring_dataset(' @return: something ',
retval_desc='something')
self.check_from_docstring_dataset("""
Main description
@param p1: this is
a multiline desc for p1
:param p2: p2 description
@return: retval description
:return: retval description
override
""", desc='Main description', param_dict={
'p1': 'this is\na multiline desc for p1\n',
'p2': 'p2 description\n'
}, retval_desc='retval description override')
def test_str(self):
uut = DocstringMetadata.from_docstring(
'''
Description of something. No params.
''')
self.assertEqual(str(uut), 'Description of something. No params.')
uut = DocstringMetadata.from_docstring(
'''
Description of something with params.
:param x: Imagine something.
:param y: x^2
''')
self.assertEqual(str(uut), 'Description of something with params.')
def test_unneeded_docstring_space(self):
uut = DocstringMetadata.from_docstring(
"""
This is a description about some bear which does some amazing
things. This is a multiline description for this testcase.
:param language:
The programming language.
:param coalang_dir:
External directory for coalang file.
""")
expected_output = OrderedDict([('language', ('The programming '
'language.')),
('coalang_dir', ('External directory '
'for coalang file.'))])
self.assertEqual(uut.param_dict, expected_output)
def check_from_docstring_dataset(self,
docstring,
desc='',
param_dict=None,
retval_desc=''):
param_dict = param_dict or {}
self.assertIsInstance(docstring,
str,
'docstring needs to be a string for this test.')
doc_comment = DocstringMetadata.from_docstring(docstring)
self.assertEqual(doc_comment.desc, desc)
self.assertEqual(doc_comment.param_dict, param_dict)
self.assertEqual(doc_comment.retval_desc, retval_desc)
| agpl-3.0 |
KohlsTechnology/ansible | test/units/modules/system/test_iptables.py | 36 | 17522 | from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.modules.system import iptables
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
def get_bin_path(*args, **kwargs):
return "/sbin/iptables"
class TestIptables(ModuleTestCase):
def setUp(self):
super(TestIptables, self).setUp()
self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
self.mock_get_bin_path.start()
self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
iptables.main()
def test_flush_table_without_chain(self):
"""Test flush without chain, flush the table"""
set_module_args({
'flush': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args[0][0][0], '/sbin/iptables')
self.assertEqual(run_command.call_args[0][0][1], '-t')
self.assertEqual(run_command.call_args[0][0][2], 'filter')
self.assertEqual(run_command.call_args[0][0][3], '-F')
def test_flush_table_check_true(self):
"""Test flush without parameters and check == true"""
set_module_args({
'flush': True,
'_ansible_check_mode': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 0)
# TODO ADD test flush table nat
# TODO ADD test flush with chain
# TODO ADD test flush with chain and table nat
def test_policy_table(self):
"""Test change policy of a chain"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-P',
'INPUT',
'ACCEPT',
])
def test_policy_table_no_change(self):
"""Test don't change policy of a chain if the policy is right"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy ACCEPT)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertFalse(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
def test_policy_table_changed_false(self):
"""Test flush without parameters and change == false"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
'_ansible_check_mode': True,
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
# TODO ADD test policy without chain fail
# TODO ADD test policy with chain don't exists
# TODO ADD test policy with wrong choice fail
def test_insert_rule_change_false(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_insert_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-I',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_append_rule_check_mode(self):
"""Test append a redirection rule in check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_append_rule(self):
"""Test append a redirection rule"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-A',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_remove_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment'
})
commands_results = [
(0, '', ''),
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-D',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_remove_rule_check_mode(self):
"""Test flush without parameters check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment',
'_ansible_check_mode': True,
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_insert_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
def test_insert_jump_reject_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'jump': 'REJECT',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
| gpl-3.0 |
andresriancho/moto | tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py | 1 | 12483 | template = {
"Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.",
"Parameters": {
"SSHLocation": {
"ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.",
"Description": "The IP address range that can be used to SSH to the EC2 instances",
"Default": "0.0.0.0/0",
"MinLength": "9",
"AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
"MaxLength": "18",
"Type": "String"
},
"KeyName": {
"Type": "String",
"Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances",
"MinLength": "1",
"AllowedPattern": "[\\x20-\\x7E]*",
"MaxLength": "255",
"ConstraintDescription": "can contain only ASCII characters."
},
"InstanceType": {
"Default": "m1.small",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Type": "String",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
},
"VolumeSize": {
"Description": "WebServer EC2 instance type",
"Default": "5",
"Type": "Number",
"MaxValue": "1024",
"MinValue": "5",
"ConstraintDescription": "must be between 5 and 1024 Gb."
}
},
"AWSTemplateFormatVersion": "2010-09-09",
"Outputs": {
"WebsiteURL": {
"Description": "URL for Gollum wiki",
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WebServer",
"PublicDnsName"
]
}
]
]
}
}
},
"Resources": {
"WebServerSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"SecurityGroupIngress": [
{
"ToPort": "80",
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0",
"FromPort": "80"
},
{
"ToPort": "22",
"IpProtocol": "tcp",
"CidrIp": {
"Ref": "SSHLocation"
},
"FromPort": "22"
}
],
"GroupDescription": "Enable SSH access and HTTP access on the inbound port"
}
},
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash -v\n",
"yum update -y aws-cfn-bootstrap\n",
"# Helper function\n",
"function error_exit\n",
"{\n",
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{
"Ref": "WaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"# Install Rails packages\n",
"/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r WebServer ",
" --region ",
{
"Ref": "AWS::Region"
},
" || error_exit 'Failed to run cfn-init'\n",
"# Wait for the EBS volume to show up\n",
"while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n",
"# Format the EBS volume and mount it\n",
"mkdir /var/wikidata\n",
"/sbin/mkfs -t ext3 /dev/sdh1\n",
"mount /dev/sdh1 /var/wikidata\n",
"# Initialize the wiki and fire up the server\n",
"cd /var/wikidata\n",
"git init\n",
"gollum --port 80 --host 0.0.0.0 &\n",
"# If all is well so signal success\n",
"/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '",
{
"Ref": "WaitHandle"
},
"'\n"
]
]
}
},
"KeyName": {
"Ref": "KeyName"
},
"SecurityGroups": [
{
"Ref": "WebServerSecurityGroup"
}
],
"InstanceType": {
"Ref": "InstanceType"
},
"ImageId": {
"Fn::FindInMap": [
"AWSRegionArch2AMI",
{
"Ref": "AWS::Region"
},
{
"Fn::FindInMap": [
"AWSInstanceType2Arch",
{
"Ref": "InstanceType"
},
"Arch"
]
}
]
}
},
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"rubygems": {
"nokogiri": [
"1.5.10"
],
"rdiscount": [],
"gollum": [
"1.1.1"
]
},
"yum": {
"libxslt-devel": [],
"gcc": [],
"git": [],
"rubygems": [],
"ruby-devel": [],
"ruby-rdoc": [],
"make": [],
"libxml2-devel": []
}
}
}
}
}
},
"DataVolume": {
"Type": "AWS::EC2::Volume",
"Properties": {
"Tags": [
{
"Value": "Gollum Data Volume",
"Key": "Usage"
}
],
"AvailabilityZone": {
"Fn::GetAtt": [
"WebServer",
"AvailabilityZone"
]
},
"Size": "100",
}
},
"MountPoint": {
"Type": "AWS::EC2::VolumeAttachment",
"Properties": {
"InstanceId": {
"Ref": "WebServer"
},
"Device": "/dev/sdh",
"VolumeId": {
"Ref": "DataVolume"
}
}
},
"WaitCondition": {
"DependsOn": "MountPoint",
"Type": "AWS::CloudFormation::WaitCondition",
"Properties": {
"Handle": {
"Ref": "WaitHandle"
},
"Timeout": "300"
},
"Metadata": {
"Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance",
"Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion"
}
},
"WaitHandle": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
}
},
"Mappings": {
"AWSInstanceType2Arch": {
"m3.2xlarge": {
"Arch": "64"
},
"m2.2xlarge": {
"Arch": "64"
},
"m1.small": {
"Arch": "64"
},
"c1.medium": {
"Arch": "64"
},
"cg1.4xlarge": {
"Arch": "64HVM"
},
"m2.xlarge": {
"Arch": "64"
},
"t1.micro": {
"Arch": "64"
},
"cc1.4xlarge": {
"Arch": "64HVM"
},
"m1.medium": {
"Arch": "64"
},
"cc2.8xlarge": {
"Arch": "64HVM"
},
"m1.large": {
"Arch": "64"
},
"m1.xlarge": {
"Arch": "64"
},
"m2.4xlarge": {
"Arch": "64"
},
"c1.xlarge": {
"Arch": "64"
},
"m3.xlarge": {
"Arch": "64"
}
},
"AWSRegionArch2AMI": {
"ap-southeast-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-b4b0cae6",
"64": "ami-beb0caec"
},
"ap-southeast-2": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-b3990e89",
"64": "ami-bd990e87"
},
"us-west-2": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-38fe7308",
"64": "ami-30fe7300"
},
"us-east-1": {
"64HVM": "ami-0da96764",
"32": "ami-31814f58",
"64": "ami-1b814f72"
},
"ap-northeast-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-0644f007",
"64": "ami-0a44f00b"
},
"us-west-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-11d68a54",
"64": "ami-1bd68a5e"
},
"eu-west-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-973b06e3",
"64": "ami-953b06e1"
},
"sa-east-1": {
"64HVM": "NOT_YET_SUPPORTED",
"32": "ami-3e3be423",
"64": "ami-3c3be421"
}
}
}
}
| apache-2.0 |
rhshah/basicfiltering | tests/test_mutect.py | 1 | 3007 | '''
@Description : This tool helps to test mutect
@Created : 03/23/2017
@Updated : 03/23/2017
@author : Ronak H Shah
'''
import filecmp
import os
from subprocess import Popen
import shlex
import nose
import logging
def setup_module():
this_dir, this_filename = os.path.split(__file__)
new_dir = os.path.dirname(this_dir)
inputFileVcf = os.path.join(new_dir, "data", "sample_input", "PoolTumor2-T_bc52_muTect_1.1.4.vcf")
inputFileTxt = os.path.join(new_dir, "data", "sample_input", "PoolTumor2-T_bc52_muTect_1.1.4.txt")
outFileVcf = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.vcf")
outFileTxt = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.txt")
cmpFileTxt = os.path.join(new_dir, "data", "sample_output", "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.txt")
cmpFileVcf = os.path.join(new_dir, "data", "sample_output", "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.vcf")
scriptFile = os.path.join(new_dir, "filter_mutect.py")
cmd = "python " + scriptFile + " -v -tsn PoolTumor2-T " + "-ivcf " + inputFileVcf + " -itxt " + inputFileTxt
args = shlex.split(cmd)
if(os.path.isfile(outFileTxt) or (os.path.isfile(outFileVcf))):
os.remove(outFileTxt)
os.remove(outFileVcf)
try:
proc = Popen(args)
proc.wait()
retcode = proc.returncode
if(retcode >= 0):
pass
except:
e = sys.exc_info()[0]
logging.info("Running of python command: %s \n has failed. The exception produced is %s Thus we will exit",cmd,e)
sys.exit(1)
def teardown_module():
this_dir, this_filename = os.path.split(__file__)
new_dir = os.path.dirname(this_dir)
outFileVcf = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.vcf")
outFileTxt = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.txt")
if(os.path.isfile(outFileTxt) or (os.path.isfile(outFileVcf))):
os.remove(outFileTxt)
os.remove(outFileVcf)
def test_text_fileSimilarity():
this_dir, this_filename = os.path.split(__file__)
new_dir = os.path.dirname(this_dir)
outFileTxt = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.txt")
cmpFileTxt = os.path.join(new_dir, "data", "sample_output", "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.txt")
nose.tools.ok_(filecmp.cmp(outFileTxt, cmpFileTxt), msg="The current result text file and the original result text file for MuTect are not the same")
def test_vcf_fileSimilarity():
this_dir, this_filename = os.path.split(__file__)
new_dir = os.path.dirname(this_dir)
outFileVcf = os.path.join(new_dir, "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.vcf")
cmpFileVcf = os.path.join(new_dir, "data", "sample_output", "PoolTumor2-T_bc52_muTect_1.1.4_STDfilter.vcf")
nose.tools.ok_(filecmp.cmp(outFileVcf, cmpFileVcf), msg="The current result vcf file and the original result vcf file for MuTect are not the same")
if __name__ == '__main__':
nose.main()
| apache-2.0 |
klmitch/glance | glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py | 12 | 10367 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table)
from glance.db.sqlalchemy.migrate_repo.schema import (
BigInteger, Boolean, DateTime, Integer, Numeric, String, Text,
create_tables) # noqa
def define_artifacts_table(meta):
artifacts = Table('artifacts',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('name', String(255), nullable=False),
Column('type_name', String(255), nullable=False),
Column('type_version_prefix', BigInteger(),
nullable=False),
Column('type_version_suffix', String(255)),
Column('type_version_meta', String(255)),
Column('version_prefix', BigInteger(), nullable=False),
Column('version_suffix', String(255)),
Column('version_meta', String(255)),
Column('description', Text()),
Column('visibility', String(32), nullable=False),
Column('state', String(32), nullable=False),
Column('owner', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('deleted_at', DateTime()),
Column('published_at', DateTime()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_name_and_version', artifacts.c.name,
artifacts.c.version_prefix, artifacts.c.version_suffix)
Index('ix_artifact_type', artifacts.c.type_name,
artifacts.c.type_version_prefix, artifacts.c.type_version_suffix)
Index('ix_artifact_state', artifacts.c.state)
Index('ix_artifact_owner', artifacts.c.owner)
Index('ix_artifact_visibility', artifacts.c.visibility)
return artifacts
def define_artifact_tags_table(meta):
artifact_tags = Table('artifact_tags',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'), nullable=False),
Column('value', String(255), nullable=False),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_tags_artifact_id', artifact_tags.c.artifact_id)
Index('ix_artifact_tags_artifact_id_tag_value',
artifact_tags.c.artifact_id, artifact_tags.c.value)
return artifact_tags
def define_artifact_dependencies_table(meta):
artifact_dependencies = Table('artifact_dependencies',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_source', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_dest', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('artifact_origin', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('is_direct', Boolean(),
nullable=False),
Column('position', Integer()),
Column('name', String(36)),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_dependencies_source_id',
artifact_dependencies.c.artifact_source)
Index('ix_artifact_dependencies_dest_id',
artifact_dependencies.c.artifact_dest),
Index('ix_artifact_dependencies_origin_id',
artifact_dependencies.c.artifact_origin)
Index('ix_artifact_dependencies_direct_dependencies',
artifact_dependencies.c.artifact_source,
artifact_dependencies.c.is_direct)
return artifact_dependencies
def define_artifact_blobs_table(meta):
artifact_blobs = Table('artifact_blobs',
meta,
Column('id', String(36), primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('size', BigInteger(), nullable=False),
Column('checksum', String(32)),
Column('name', String(255), nullable=False),
Column('item_key', String(329)),
Column('position', Integer()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime(),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blobs_artifact_id',
artifact_blobs.c.artifact_id)
Index('ix_artifact_blobs_name',
artifact_blobs.c.name)
return artifact_blobs
def define_artifact_properties_table(meta):
artifact_properties = Table('artifact_properties',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('artifact_id', String(36),
ForeignKey('artifacts.id'),
nullable=False),
Column('name', String(255),
nullable=False),
Column('string_value', String(255)),
Column('int_value', Integer()),
Column('numeric_value', Numeric()),
Column('bool_value', Boolean()),
Column('text_value', Text()),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_properties_artifact_id',
artifact_properties.c.artifact_id)
Index('ix_artifact_properties_name', artifact_properties.c.name)
return artifact_properties
def define_artifact_blob_locations_table(meta):
artifact_blob_locations = Table('artifact_blob_locations',
meta,
Column('id', String(36),
primary_key=True,
nullable=False),
Column('blob_id', String(36),
ForeignKey('artifact_blobs.id'),
nullable=False),
Column('value', Text(), nullable=False),
Column('created_at', DateTime(),
nullable=False),
Column('updated_at', DateTime(),
nullable=False),
Column('position', Integer()),
Column('status', String(36),
nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
Index('ix_artifact_blob_locations_blob_id',
artifact_blob_locations.c.blob_id)
return artifact_blob_locations
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_artifacts_table(meta),
define_artifact_tags_table(meta),
define_artifact_properties_table(meta),
define_artifact_blobs_table(meta),
define_artifact_blob_locations_table(meta),
define_artifact_dependencies_table(meta)]
create_tables(tables)
| apache-2.0 |
NeuralEnsemble/neuroConstruct | lib/jython/Lib/test/test_mailbox.py | 50 | 86537 | import os
import sys
import time
import stat
import socket
import email
import email.message
import re
import shutil
import StringIO
import tempfile
from test import test_support
import unittest
import mailbox
import glob
try:
import fcntl
except ImportError:
pass
# Silence Py3k warning
rfc822 = test_support.import_module('rfc822', deprecated=True)
class TestBase:
def _check_sample(self, msg):
# Inspect a mailbox.Message representation of the sample message
self.assertIsInstance(msg, email.message.Message)
self.assertIsInstance(msg, mailbox.Message)
for key, value in _sample_headers.iteritems():
self.assertIn(value, msg.get_all(key))
self.assertTrue(msg.is_multipart())
self.assertEqual(len(msg.get_payload()), len(_sample_payloads))
for i, payload in enumerate(_sample_payloads):
part = msg.get_payload(i)
self.assertIsInstance(part, email.message.Message)
self.assertNotIsInstance(part, mailbox.Message)
self.assertEqual(part.get_payload(), payload)
def _delete_recursively(self, target):
# Delete a file or delete a directory recursively
if os.path.isdir(target):
test_support.rmtree(target)
elif os.path.exists(target):
test_support.unlink(target)
class TestMailbox(TestBase):
_factory = None # Overridden by subclasses to reuse tests
_template = 'From: foo\n\n%s\n'
def setUp(self):
self._path = test_support.TESTFN
self._delete_recursively(self._path)
self._box = self._factory(self._path)
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
def test_add(self):
# Add copies of a sample message
keys = []
keys.append(self._box.add(self._template % 0))
self.assertEqual(len(self._box), 1)
keys.append(self._box.add(mailbox.Message(_sample_message)))
self.assertEqual(len(self._box), 2)
keys.append(self._box.add(email.message_from_string(_sample_message)))
self.assertEqual(len(self._box), 3)
keys.append(self._box.add(StringIO.StringIO(_sample_message)))
self.assertEqual(len(self._box), 4)
keys.append(self._box.add(_sample_message))
self.assertEqual(len(self._box), 5)
self.assertEqual(self._box.get_string(keys[0]), self._template % 0)
for i in (1, 2, 3, 4):
self._check_sample(self._box[keys[i]])
def test_add_file(self):
with tempfile.TemporaryFile('w+') as f:
f.write(_sample_message)
f.seek(0)
key = self._box.add(f)
self.assertEqual(self._box.get_string(key).split('\n'),
_sample_message.split('\n'))
def test_add_StringIO(self):
key = self._box.add(StringIO.StringIO(self._template % "0"))
self.assertEqual(self._box.get_string(key), self._template % "0")
def test_remove(self):
# Remove messages using remove()
self._test_remove_or_delitem(self._box.remove)
def test_delitem(self):
# Remove messages using __delitem__()
self._test_remove_or_delitem(self._box.__delitem__)
def _test_remove_or_delitem(self, method):
# (Used by test_remove() and test_delitem().)
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(len(self._box), 2)
method(key0)
l = len(self._box)
self.assertEqual(l, 1)
self.assertRaises(KeyError, lambda: self._box[key0])
self.assertRaises(KeyError, lambda: method(key0))
self.assertEqual(self._box.get_string(key1), self._template % 1)
key2 = self._box.add(self._template % 2)
self.assertEqual(len(self._box), 2)
method(key2)
l = len(self._box)
self.assertEqual(l, 1)
self.assertRaises(KeyError, lambda: self._box[key2])
self.assertRaises(KeyError, lambda: method(key2))
self.assertEqual(self._box.get_string(key1), self._template % 1)
method(key1)
self.assertEqual(len(self._box), 0)
self.assertRaises(KeyError, lambda: self._box[key1])
self.assertRaises(KeyError, lambda: method(key1))
def test_discard(self, repetitions=10):
# Discard messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(len(self._box), 2)
self._box.discard(key0)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key0])
self._box.discard(key0)
self.assertEqual(len(self._box), 1)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get(self):
# Retrieve messages using get()
key0 = self._box.add(self._template % 0)
msg = self._box.get(key0)
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.get_payload(), '0\n')
self.assertIs(self._box.get('foo'), None)
self.assertFalse(self._box.get('foo', False))
self._box.close()
self._box = self._factory(self._path, factory=rfc822.Message)
key1 = self._box.add(self._template % 1)
msg = self._box.get(key1)
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.fp.read(), '1' + os.linesep)
msg.fp.close()
def test_getitem(self):
# Retrieve message using __getitem__()
key0 = self._box.add(self._template % 0)
msg = self._box[key0]
self.assertEqual(msg['from'], 'foo')
self.assertEqual(msg.get_payload(), '0\n')
self.assertRaises(KeyError, lambda: self._box['foo'])
self._box.discard(key0)
self.assertRaises(KeyError, lambda: self._box[key0])
def test_get_message(self):
# Get Message representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
msg0 = self._box.get_message(key0)
self.assertIsInstance(msg0, mailbox.Message)
self.assertEqual(msg0['from'], 'foo')
self.assertEqual(msg0.get_payload(), '0\n')
self._check_sample(self._box.get_message(key1))
def test_get_string(self):
# Get string representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
self.assertEqual(self._box.get_string(key0), self._template % 0)
self.assertEqual(self._box.get_string(key1), _sample_message)
def test_get_file(self):
# Get file representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
msg0 = self._box.get_file(key0)
self.assertEqual(msg0.read().replace(os.linesep, '\n'),
self._template % 0)
msg1 = self._box.get_file(key1)
self.assertEqual(msg1.read().replace(os.linesep, '\n'),
_sample_message)
msg0.close()
msg1.close()
def test_get_file_can_be_closed_twice(self):
# Issue 11700
key = self._box.add(_sample_message)
f = self._box.get_file(key)
f.close()
f.close()
def test_iterkeys(self):
# Get keys using iterkeys()
self._check_iteration(self._box.iterkeys, do_keys=True, do_values=False)
def test_keys(self):
# Get keys using keys()
self._check_iteration(self._box.keys, do_keys=True, do_values=False)
def test_itervalues(self):
# Get values using itervalues()
self._check_iteration(self._box.itervalues, do_keys=False,
do_values=True)
def test_iter(self):
# Get values using __iter__()
self._check_iteration(self._box.__iter__, do_keys=False,
do_values=True)
def test_values(self):
# Get values using values()
self._check_iteration(self._box.values, do_keys=False, do_values=True)
def test_iteritems(self):
# Get keys and values using iteritems()
self._check_iteration(self._box.iteritems, do_keys=True,
do_values=True)
def test_items(self):
# Get keys and values using items()
self._check_iteration(self._box.items, do_keys=True, do_values=True)
def _check_iteration(self, method, do_keys, do_values, repetitions=10):
for value in method():
self.fail("Not empty")
keys, values = [], []
for i in xrange(repetitions):
keys.append(self._box.add(self._template % i))
values.append(self._template % i)
if do_keys and not do_values:
returned_keys = list(method())
elif do_values and not do_keys:
returned_values = list(method())
else:
returned_keys, returned_values = [], []
for key, value in method():
returned_keys.append(key)
returned_values.append(value)
if do_keys:
self.assertEqual(len(keys), len(returned_keys))
self.assertEqual(set(keys), set(returned_keys))
if do_values:
count = 0
for value in returned_values:
self.assertEqual(value['from'], 'foo')
self.assertTrue(int(value.get_payload()) < repetitions,
(value.get_payload(), repetitions))
count += 1
self.assertEqual(len(values), count)
def test_has_key(self):
# Check existence of keys using has_key()
self._test_has_key_or_contains(self._box.has_key)
def test_contains(self):
# Check existence of keys using __contains__()
self._test_has_key_or_contains(self._box.__contains__)
def _test_has_key_or_contains(self, method):
# (Used by test_has_key() and test_contains().)
self.assertFalse(method('foo'))
key0 = self._box.add(self._template % 0)
self.assertTrue(method(key0))
self.assertFalse(method('foo'))
key1 = self._box.add(self._template % 1)
self.assertTrue(method(key1))
self.assertTrue(method(key0))
self.assertFalse(method('foo'))
self._box.remove(key0)
self.assertFalse(method(key0))
self.assertTrue(method(key1))
self.assertFalse(method('foo'))
self._box.remove(key1)
self.assertFalse(method(key1))
self.assertFalse(method(key0))
self.assertFalse(method('foo'))
def test_len(self, repetitions=10):
# Get message count
keys = []
for i in xrange(repetitions):
self.assertEqual(len(self._box), i)
keys.append(self._box.add(self._template % i))
self.assertEqual(len(self._box), i + 1)
for i in xrange(repetitions):
self.assertEqual(len(self._box), repetitions - i)
self._box.remove(keys[i])
self.assertEqual(len(self._box), repetitions - i - 1)
def test_set_item(self):
# Modify messages using __setitem__()
key0 = self._box.add(self._template % 'original 0')
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
self.assertEqual(self._box.get_string(key1),
self._template % 'original 1')
self._box[key0] = self._template % 'changed 0'
self.assertEqual(self._box.get_string(key0),
self._template % 'changed 0')
self._box[key1] = self._template % 'changed 1'
self.assertEqual(self._box.get_string(key1),
self._template % 'changed 1')
self._box[key0] = _sample_message
self._check_sample(self._box[key0])
self._box[key1] = self._box[key0]
self._check_sample(self._box[key1])
self._box[key0] = self._template % 'original 0'
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
self._check_sample(self._box[key1])
self.assertRaises(KeyError,
lambda: self._box.__setitem__('foo', 'bar'))
self.assertRaises(KeyError, lambda: self._box['foo'])
self.assertEqual(len(self._box), 2)
def test_clear(self, iterations=10):
# Remove all messages using clear()
keys = []
for i in xrange(iterations):
self._box.add(self._template % i)
for i, key in enumerate(keys):
self.assertEqual(self._box.get_string(key), self._template % i)
self._box.clear()
self.assertEqual(len(self._box), 0)
for i, key in enumerate(keys):
self.assertRaises(KeyError, lambda: self._box.get_string(key))
def test_pop(self):
# Get and remove a message using pop()
key0 = self._box.add(self._template % 0)
self.assertIn(key0, self._box)
key1 = self._box.add(self._template % 1)
self.assertIn(key1, self._box)
self.assertEqual(self._box.pop(key0).get_payload(), '0\n')
self.assertNotIn(key0, self._box)
self.assertIn(key1, self._box)
key2 = self._box.add(self._template % 2)
self.assertIn(key2, self._box)
self.assertEqual(self._box.pop(key2).get_payload(), '2\n')
self.assertNotIn(key2, self._box)
self.assertIn(key1, self._box)
self.assertEqual(self._box.pop(key1).get_payload(), '1\n')
self.assertNotIn(key1, self._box)
self.assertEqual(len(self._box), 0)
def test_popitem(self, iterations=10):
# Get and remove an arbitrary (key, message) using popitem()
keys = []
for i in xrange(10):
keys.append(self._box.add(self._template % i))
seen = []
for i in xrange(10):
key, msg = self._box.popitem()
self.assertIn(key, keys)
self.assertNotIn(key, seen)
seen.append(key)
self.assertEqual(int(msg.get_payload()), keys.index(key))
self.assertEqual(len(self._box), 0)
for key in keys:
self.assertRaises(KeyError, lambda: self._box[key])
def test_update(self):
# Modify multiple messages using update()
key0 = self._box.add(self._template % 'original 0')
key1 = self._box.add(self._template % 'original 1')
key2 = self._box.add(self._template % 'original 2')
self._box.update({key0: self._template % 'changed 0',
key2: _sample_message})
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % 'changed 0')
self.assertEqual(self._box.get_string(key1),
self._template % 'original 1')
self._check_sample(self._box[key2])
self._box.update([(key2, self._template % 'changed 2'),
(key1, self._template % 'changed 1'),
(key0, self._template % 'original 0')])
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % 'original 0')
self.assertEqual(self._box.get_string(key1),
self._template % 'changed 1')
self.assertEqual(self._box.get_string(key2),
self._template % 'changed 2')
self.assertRaises(KeyError,
lambda: self._box.update({'foo': 'bar',
key0: self._template % "changed 0"}))
self.assertEqual(len(self._box), 3)
self.assertEqual(self._box.get_string(key0),
self._template % "changed 0")
self.assertEqual(self._box.get_string(key1),
self._template % "changed 1")
self.assertEqual(self._box.get_string(key2),
self._template % "changed 2")
def test_flush(self):
# Write changes to disk
self._test_flush_or_close(self._box.flush, True)
def test_popitem_and_flush_twice(self):
# See #15036.
self._box.add(self._template % 0)
self._box.add(self._template % 1)
self._box.flush()
self._box.popitem()
self._box.flush()
self._box.popitem()
self._box.flush()
def test_lock_unlock(self):
# Lock and unlock the mailbox
self.assertFalse(os.path.exists(self._get_lock_path()))
self._box.lock()
self.assertTrue(os.path.exists(self._get_lock_path()))
self._box.unlock()
self.assertFalse(os.path.exists(self._get_lock_path()))
def test_close(self):
# Close mailbox and flush changes to disk
self._test_flush_or_close(self._box.close, False)
def _test_flush_or_close(self, method, should_call_close):
contents = [self._template % i for i in xrange(3)]
self._box.add(contents[0])
self._box.add(contents[1])
self._box.add(contents[2])
oldbox = self._box
method()
if should_call_close:
self._box.close()
self._box = self._factory(self._path)
keys = self._box.keys()
self.assertEqual(len(keys), 3)
for key in keys:
self.assertIn(self._box.get_string(key), contents)
oldbox.close()
def test_dump_message(self):
# Write message representations to disk
for input in (email.message_from_string(_sample_message),
_sample_message, StringIO.StringIO(_sample_message)):
output = StringIO.StringIO()
self._box._dump_message(input, output)
self.assertEqual(output.getvalue(),
_sample_message.replace('\n', os.linesep))
output = StringIO.StringIO()
self.assertRaises(TypeError,
lambda: self._box._dump_message(None, output))
def _get_lock_path(self):
# Return the path of the dot lock file. May be overridden.
return self._path + '.lock'
class TestMailboxSuperclass(TestBase, unittest.TestCase):
def test_notimplemented(self):
# Test that all Mailbox methods raise NotImplementedException.
box = mailbox.Mailbox('path')
self.assertRaises(NotImplementedError, lambda: box.add(''))
self.assertRaises(NotImplementedError, lambda: box.remove(''))
self.assertRaises(NotImplementedError, lambda: box.__delitem__(''))
self.assertRaises(NotImplementedError, lambda: box.discard(''))
self.assertRaises(NotImplementedError, lambda: box.__setitem__('', ''))
self.assertRaises(NotImplementedError, lambda: box.iterkeys())
self.assertRaises(NotImplementedError, lambda: box.keys())
self.assertRaises(NotImplementedError, lambda: box.itervalues().next())
self.assertRaises(NotImplementedError, lambda: box.__iter__().next())
self.assertRaises(NotImplementedError, lambda: box.values())
self.assertRaises(NotImplementedError, lambda: box.iteritems().next())
self.assertRaises(NotImplementedError, lambda: box.items())
self.assertRaises(NotImplementedError, lambda: box.get(''))
self.assertRaises(NotImplementedError, lambda: box.__getitem__(''))
self.assertRaises(NotImplementedError, lambda: box.get_message(''))
self.assertRaises(NotImplementedError, lambda: box.get_string(''))
self.assertRaises(NotImplementedError, lambda: box.get_file(''))
self.assertRaises(NotImplementedError, lambda: box.has_key(''))
self.assertRaises(NotImplementedError, lambda: box.__contains__(''))
self.assertRaises(NotImplementedError, lambda: box.__len__())
self.assertRaises(NotImplementedError, lambda: box.clear())
self.assertRaises(NotImplementedError, lambda: box.pop(''))
self.assertRaises(NotImplementedError, lambda: box.popitem())
self.assertRaises(NotImplementedError, lambda: box.update((('', ''),)))
self.assertRaises(NotImplementedError, lambda: box.flush())
self.assertRaises(NotImplementedError, lambda: box.lock())
self.assertRaises(NotImplementedError, lambda: box.unlock())
self.assertRaises(NotImplementedError, lambda: box.close())
class TestMaildir(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
def setUp(self):
TestMailbox.setUp(self)
if os.name in ('nt', 'os2') or sys.platform == 'cygwin':
self._box.colon = '!'
def test_add_MM(self):
# Add a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_info('foo')
key = self._box.add(msg)
self.assertTrue(os.path.exists(os.path.join(self._path, 'cur', '%s%sfoo' %
(key, self._box.colon))))
def test_get_MM(self):
# Get a MaildirMessage instance
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
msg_returned = self._box.get_message(key)
self.assertIsInstance(msg_returned, mailbox.MaildirMessage)
self.assertEqual(msg_returned.get_subdir(), 'cur')
self.assertEqual(msg_returned.get_flags(), 'FR')
def test_set_MM(self):
# Set with a MaildirMessage instance
msg0 = mailbox.MaildirMessage(self._template % 0)
msg0.set_flags('TP')
key = self._box.add(msg0)
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), 'PT')
msg1 = mailbox.MaildirMessage(self._template % 1)
self._box[key] = msg1
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), '')
self.assertEqual(msg_returned.get_payload(), '1\n')
msg2 = mailbox.MaildirMessage(self._template % 2)
msg2.set_info('2,S')
self._box[key] = msg2
self._box[key] = self._template % 3
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), 'S')
self.assertEqual(msg_returned.get_payload(), '3\n')
def test_consistent_factory(self):
# Add a message.
msg = mailbox.MaildirMessage(self._template % 0)
msg.set_subdir('cur')
msg.set_flags('RF')
key = self._box.add(msg)
# Create new mailbox with
class FakeMessage(mailbox.MaildirMessage):
pass
box = mailbox.Maildir(self._path, factory=FakeMessage)
box.colon = self._box.colon
msg2 = box.get_message(key)
self.assertIsInstance(msg2, FakeMessage)
def test_initialize_new(self):
# Initialize a non-existent mailbox
self.tearDown()
self._box = mailbox.Maildir(self._path)
self._check_basics(factory=rfc822.Message)
self._delete_recursively(self._path)
self._box = self._factory(self._path, factory=None)
self._check_basics()
def test_initialize_existing(self):
# Initialize an existing mailbox
self.tearDown()
for subdir in '', 'tmp', 'new', 'cur':
os.mkdir(os.path.normpath(os.path.join(self._path, subdir)))
self._box = mailbox.Maildir(self._path)
self._check_basics(factory=rfc822.Message)
self._box = mailbox.Maildir(self._path, factory=None)
self._check_basics()
def _check_basics(self, factory=None):
# (Used by test_open_new() and test_open_existing().)
self.assertEqual(self._box._path, os.path.abspath(self._path))
self.assertEqual(self._box._factory, factory)
for subdir in '', 'tmp', 'new', 'cur':
path = os.path.join(self._path, subdir)
mode = os.stat(path)[stat.ST_MODE]
self.assertTrue(stat.S_ISDIR(mode), "Not a directory: '%s'" % path)
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 3)
self.assertEqual(set(self._box.list_folders()),
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assertTrue(os.path.isdir(os.path.join(self._path, '.foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assertEqual(folder1.get_string(folder1.keys()[0]),
self._template % 'bar')
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('one', 'two')))
self._box.remove_folder('one')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('two', 'three')))
self._box.remove_folder('three')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two',)))
self._box.remove_folder('two')
self.assertEqual(len(self._box.list_folders()), 0)
self.assertEqual(self._box.list_folders(), [])
def test_clean(self):
# Remove old files from 'tmp'
foo_path = os.path.join(self._path, 'tmp', 'foo')
bar_path = os.path.join(self._path, 'tmp', 'bar')
with open(foo_path, 'w') as f:
f.write("@")
with open(bar_path, 'w') as f:
f.write("@")
self._box.clean()
self.assertTrue(os.path.exists(foo_path))
self.assertTrue(os.path.exists(bar_path))
foo_stat = os.stat(foo_path)
os.utime(foo_path, (time.time() - 129600 - 2,
foo_stat.st_mtime))
self._box.clean()
self.assertFalse(os.path.exists(foo_path))
self.assertTrue(os.path.exists(bar_path))
def test_create_tmp(self, repetitions=10):
# Create files in tmp directory
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
pid = os.getpid()
pattern = re.compile(r"(?P<time>\d+)\.M(?P<M>\d{1,6})P(?P<P>\d+)"
r"Q(?P<Q>\d+)\.(?P<host>[^:/]+)")
previous_groups = None
for x in xrange(repetitions):
tmp_file = self._box._create_tmp()
head, tail = os.path.split(tmp_file.name)
self.assertEqual(head, os.path.abspath(os.path.join(self._path,
"tmp")),
"File in wrong location: '%s'" % head)
match = pattern.match(tail)
self.assertTrue(match is not None, "Invalid file name: '%s'" % tail)
groups = match.groups()
if previous_groups is not None:
self.assertGreaterEqual(int(groups[0]), int(previous_groups[0]),
"Non-monotonic seconds: '%s' before '%s'" %
(previous_groups[0], groups[0]))
if int(groups[0]) == int(previous_groups[0]):
self.assertGreaterEqual(int(groups[1]), int(previous_groups[1]),
"Non-monotonic milliseconds: '%s' before '%s'" %
(previous_groups[1], groups[1]))
self.assertTrue(int(groups[2]) == pid,
"Process ID mismatch: '%s' should be '%s'" %
(groups[2], pid))
self.assertTrue(int(groups[3]) == int(previous_groups[3]) + 1,
"Non-sequential counter: '%s' before '%s'" %
(previous_groups[3], groups[3]))
self.assertTrue(groups[4] == hostname,
"Host name mismatch: '%s' should be '%s'" %
(groups[4], hostname))
previous_groups = groups
tmp_file.write(_sample_message)
tmp_file.seek(0)
self.assertTrue(tmp_file.read() == _sample_message)
tmp_file.close()
file_count = len(os.listdir(os.path.join(self._path, "tmp")))
self.assertTrue(file_count == repetitions,
"Wrong file count: '%s' should be '%s'" %
(file_count, repetitions))
def test_refresh(self):
# Update the table of contents
self.assertEqual(self._box._toc, {})
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self.assertEqual(self._box._toc, {})
self._box._refresh()
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
key2 = self._box.add(self._template % 2)
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1)})
self._box._refresh()
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0),
key1: os.path.join('new', key1),
key2: os.path.join('new', key2)})
def test_refresh_after_safety_period(self):
# Issue #13254: Call _refresh after the "file system safety
# period" of 2 seconds has passed; _toc should still be
# updated because this is the first call to _refresh.
key0 = self._box.add(self._template % 0)
key1 = self._box.add(self._template % 1)
self._box = self._factory(self._path)
self.assertEqual(self._box._toc, {})
# Emulate sleeping. Instead of sleeping for 2 seconds, use the
# skew factor to make _refresh think that the filesystem
# safety period has passed and re-reading the _toc is only
# required if mtimes differ.
self._box._skewfactor = -3
self._box._refresh()
self.assertEqual(sorted(self._box._toc.keys()), sorted([key0, key1]))
def test_lookup(self):
# Look up message subpaths in the TOC
self.assertRaises(KeyError, lambda: self._box._lookup('foo'))
key0 = self._box.add(self._template % 0)
self.assertEqual(self._box._lookup(key0), os.path.join('new', key0))
os.remove(os.path.join(self._path, 'new', key0))
self.assertEqual(self._box._toc, {key0: os.path.join('new', key0)})
# Be sure that the TOC is read back from disk (see issue #6896
# about bad mtime behaviour on some systems).
self._box.flush()
self.assertRaises(KeyError, lambda: self._box._lookup(key0))
self.assertEqual(self._box._toc, {})
def test_lock_unlock(self):
# Lock and unlock the mailbox. For Maildir, this does nothing.
self._box.lock()
self._box.unlock()
def test_folder (self):
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
def dummy_factory (s):
return None
box = self._factory(self._path, factory=dummy_factory)
folder = box.add_folder('folder1')
self.assertIs(folder._factory, dummy_factory)
folder1_alias = box.get_folder('folder1')
self.assertIs(folder1_alias._factory, dummy_factory)
def test_directory_in_folder (self):
# Test that mailboxes still work if there's a stray extra directory
# in a folder.
for i in range(10):
self._box.add(mailbox.Message(_sample_message))
# Create a stray directory
os.mkdir(os.path.join(self._path, 'cur', 'stray-dir'))
# Check that looping still works with the directory present.
for msg in self._box:
pass
def test_file_permissions(self):
# Verify that message files are created without execute permissions
if not hasattr(os, "stat") or not hasattr(os, "umask"):
return
msg = mailbox.MaildirMessage(self._template % 0)
orig_umask = os.umask(0)
try:
key = self._box.add(msg)
finally:
os.umask(orig_umask)
path = os.path.join(self._path, self._box._lookup(key))
mode = os.stat(path).st_mode
self.assertEqual(mode & 0111, 0)
def test_folder_file_perms(self):
# From bug #3228, we want to verify that the file created inside a Maildir
# subfolder isn't marked as executable.
if not hasattr(os, "stat") or not hasattr(os, "umask"):
return
orig_umask = os.umask(0)
try:
subfolder = self._box.add_folder('subfolder')
finally:
os.umask(orig_umask)
path = os.path.join(subfolder._path, 'maildirfolder')
st = os.stat(path)
perms = st.st_mode
self.assertFalse((perms & 0111)) # Execute bits should all be off.
def test_reread(self):
# Do an initial unconditional refresh
self._box._refresh()
# Put the last modified times more than two seconds into the past
# (because mtime may have only a two second granularity).
for subdir in ('cur', 'new'):
os.utime(os.path.join(self._box._path, subdir),
(time.time()-5,)*2)
# Because mtime has a two second granularity in worst case (FAT), a
# refresh is done unconditionally if called for within
# two-second-plus-a-bit of the last one, just in case the mbox has
# changed; so now we have to wait for that interval to expire.
#
# Because this is a test, emulate sleeping. Instead of
# sleeping for 2 seconds, use the skew factor to make _refresh
# think that 2 seconds have passed and re-reading the _toc is
# only required if mtimes differ.
self._box._skewfactor = -3
# Re-reading causes the ._toc attribute to be assigned a new dictionary
# object, so we'll check that the ._toc attribute isn't a different
# object.
orig_toc = self._box._toc
def refreshed():
return self._box._toc is not orig_toc
self._box._refresh()
self.assertFalse(refreshed())
# Now, write something into cur and remove it. This changes
# the mtime and should cause a re-read. Note that "sleep
# emulation" is still in effect, as skewfactor is -3.
filename = os.path.join(self._path, 'cur', 'stray-file')
f = open(filename, 'w')
f.close()
os.unlink(filename)
self._box._refresh()
self.assertTrue(refreshed())
class _TestSingleFile(TestMailbox):
'''Common tests for single-file mailboxes'''
def test_add_doesnt_rewrite(self):
# When only adding messages, flush() should not rewrite the
# mailbox file. See issue #9559.
# Inode number changes if the contents are written to another
# file which is then renamed over the original file. So we
# must check that the inode number doesn't change.
inode_before = os.stat(self._path).st_ino
self._box.add(self._template % 0)
self._box.flush()
inode_after = os.stat(self._path).st_ino
self.assertEqual(inode_before, inode_after)
# Make sure the message was really added
self._box.close()
self._box = self._factory(self._path)
self.assertEqual(len(self._box), 1)
def test_permissions_after_flush(self):
# See issue #5346
# Make the mailbox world writable. It's unlikely that the new
# mailbox file would have these permissions after flush(),
# because umask usually prevents it.
mode = os.stat(self._path).st_mode | 0o666
os.chmod(self._path, mode)
self._box.add(self._template % 0)
i = self._box.add(self._template % 1)
# Need to remove one message to make flush() create a new file
self._box.remove(i)
self._box.flush()
self.assertEqual(os.stat(self._path).st_mode, mode)
class _TestMboxMMDF(_TestSingleFile):
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
test_support.unlink(lock_remnant)
def test_add_from_string(self):
# Add a string starting with 'From ' to the mailbox
key = self._box.add('From foo@bar blah\nFrom: foo\n\n0\n')
self.assertEqual(self._box[key].get_from(), 'foo@bar blah')
self.assertEqual(self._box[key].get_payload(), '0\n')
def test_add_mbox_or_mmdf_message(self):
# Add an mboxMessage or MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_('From foo@bar blah\nFrom: foo\n\n0\n')
key = self._box.add(msg)
def test_open_close_open(self):
# Open and inspect previously-created mailbox
values = [self._template % i for i in xrange(3)]
for value in values:
self._box.add(value)
self._box.close()
mtime = os.path.getmtime(self._path)
self._box = self._factory(self._path)
self.assertEqual(len(self._box), 3)
for key in self._box.iterkeys():
self.assertIn(self._box.get_string(key), values)
self._box.close()
self.assertEqual(mtime, os.path.getmtime(self._path))
def test_add_and_close(self):
# Verifying that closing a mailbox doesn't change added items
self._box.add(_sample_message)
for i in xrange(3):
self._box.add(self._template % i)
self._box.add(_sample_message)
self._box._file.flush()
self._box._file.seek(0)
contents = self._box._file.read()
self._box.close()
with open(self._path, 'rb') as f:
self.assertEqual(contents, f.read())
self._box = self._factory(self._path)
@unittest.skipUnless(hasattr(os, 'fork'), "Test needs fork().")
@unittest.skipUnless(hasattr(socket, 'socketpair'), "Test needs socketpair().")
def test_lock_conflict(self):
# Fork off a child process that will lock the mailbox temporarily,
# unlock it and exit.
c, p = socket.socketpair()
self.addCleanup(c.close)
self.addCleanup(p.close)
pid = os.fork()
if pid == 0:
# child
try:
# lock the mailbox, and signal the parent it can proceed
self._box.lock()
c.send(b'c')
# wait until the parent is done, and unlock the mailbox
c.recv(1)
self._box.unlock()
finally:
os._exit(0)
# In the parent, wait until the child signals it locked the mailbox.
p.recv(1)
try:
self.assertRaises(mailbox.ExternalClashError,
self._box.lock)
finally:
# Signal the child it can now release the lock and exit.
p.send(b'p')
# Wait for child to exit. Locking should now succeed.
exited_pid, status = os.waitpid(pid, 0)
self._box.lock()
self._box.unlock()
def test_relock(self):
# Test case for bug #1575506: the mailbox class was locking the
# wrong file object in its flush() method.
msg = "Subject: sub\n\nbody\n"
key1 = self._box.add(msg)
self._box.flush()
self._box.close()
self._box = self._factory(self._path)
self._box.lock()
key2 = self._box.add(msg)
self._box.flush()
self.assertTrue(self._box._locked)
self._box.close()
class TestMbox(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
def test_file_perms(self):
# From bug #3228, we want to verify that the mailbox file isn't executable,
# even if the umask is set to something that would leave executable bits set.
# We only run this test on platforms that support umask.
if hasattr(os, 'umask') and hasattr(os, 'stat'):
try:
old_umask = os.umask(0077)
self._box.close()
os.unlink(self._path)
self._box = mailbox.mbox(self._path, create=True)
self._box.add('')
self._box.close()
finally:
os.umask(old_umask)
st = os.stat(self._path)
perms = st.st_mode
self.assertFalse((perms & 0111)) # Execute bits should all be off.
def test_terminating_newline(self):
message = email.message.Message()
message['From'] = 'john@example.com'
message.set_payload('No newline at the end')
i = self._box.add(message)
# A newline should have been appended to the payload
message = self._box.get(i)
self.assertEqual(message.get_payload(), 'No newline at the end\n')
def test_message_separator(self):
# Check there's always a single blank line after each message
self._box.add('From: foo\n\n0') # No newline at the end
with open(self._path) as f:
data = f.read()
self.assertEqual(data[-3:], '0\n\n')
self._box.add('From: foo\n\n0\n') # Newline at the end
with open(self._path) as f:
data = f.read()
self.assertEqual(data[-3:], '0\n\n')
class TestMMDF(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
class TestMH(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MH(path, factory)
def test_list_folders(self):
# List folders
self._box.add_folder('one')
self._box.add_folder('two')
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 3)
self.assertEqual(set(self._box.list_folders()),
set(('one', 'two', 'three')))
def test_get_folder(self):
# Open folders
def dummy_factory (s):
return None
self._box = self._factory(self._path, dummy_factory)
new_folder = self._box.add_folder('foo.bar')
folder0 = self._box.get_folder('foo.bar')
folder0.add(self._template % 'bar')
self.assertTrue(os.path.isdir(os.path.join(self._path, 'foo.bar')))
folder1 = self._box.get_folder('foo.bar')
self.assertEqual(folder1.get_string(folder1.keys()[0]),
self._template % 'bar')
# Test for bug #1569790: verify that folders returned by .get_folder()
# use the same factory function.
self.assertIs(new_folder._factory, self._box._factory)
self.assertIs(folder0._factory, self._box._factory)
def test_add_and_remove_folders(self):
# Delete folders
self._box.add_folder('one')
self._box.add_folder('two')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('one', 'two')))
self._box.remove_folder('one')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two', )))
self._box.add_folder('three')
self.assertEqual(len(self._box.list_folders()), 2)
self.assertEqual(set(self._box.list_folders()), set(('two', 'three')))
self._box.remove_folder('three')
self.assertEqual(len(self._box.list_folders()), 1)
self.assertEqual(set(self._box.list_folders()), set(('two', )))
self._box.remove_folder('two')
self.assertEqual(len(self._box.list_folders()), 0)
self.assertEqual(self._box.list_folders(), [])
def test_sequences(self):
# Get and set sequences
self.assertEqual(self._box.get_sequences(), {})
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
self.assertEqual(self._box.get_sequences(), {'foo':[key0]})
msg1 = mailbox.MHMessage(self._template % 1)
msg1.set_sequences(['bar', 'replied', 'foo'])
key1 = self._box.add(msg1)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0, key1], 'bar':[key1], 'replied':[key1]})
msg0.set_sequences(['flagged'])
self._box[key0] = msg0
self.assertEqual(self._box.get_sequences(),
{'foo':[key1], 'bar':[key1], 'replied':[key1],
'flagged':[key0]})
self._box.remove(key1)
self.assertEqual(self._box.get_sequences(), {'flagged':[key0]})
def test_issue2625(self):
msg0 = mailbox.MHMessage(self._template % 0)
msg0.add_sequence('foo')
key0 = self._box.add(msg0)
refmsg0 = self._box.get_message(key0)
def test_issue7627(self):
msg0 = mailbox.MHMessage(self._template % 0)
key0 = self._box.add(msg0)
self._box.lock()
self._box.remove(key0)
self._box.unlock()
def test_pack(self):
# Pack the contents of the mailbox
msg0 = mailbox.MHMessage(self._template % 0)
msg1 = mailbox.MHMessage(self._template % 1)
msg2 = mailbox.MHMessage(self._template % 2)
msg3 = mailbox.MHMessage(self._template % 3)
msg0.set_sequences(['foo', 'unseen'])
msg1.set_sequences(['foo'])
msg2.set_sequences(['foo', 'flagged'])
msg3.set_sequences(['foo', 'bar', 'replied'])
key0 = self._box.add(msg0)
key1 = self._box.add(msg1)
key2 = self._box.add(msg2)
key3 = self._box.add(msg3)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0,key1,key2,key3], 'unseen':[key0],
'flagged':[key2], 'bar':[key3], 'replied':[key3]})
self._box.remove(key2)
self.assertEqual(self._box.get_sequences(),
{'foo':[key0,key1,key3], 'unseen':[key0], 'bar':[key3],
'replied':[key3]})
self._box.pack()
self.assertEqual(self._box.keys(), [1, 2, 3])
key0 = key0
key1 = key0 + 1
key2 = key1 + 1
self.assertEqual(self._box.get_sequences(),
{'foo':[1, 2, 3], 'unseen':[1], 'bar':[3], 'replied':[3]})
# Test case for packing while holding the mailbox locked.
key0 = self._box.add(msg1)
key1 = self._box.add(msg1)
key2 = self._box.add(msg1)
key3 = self._box.add(msg1)
self._box.remove(key0)
self._box.remove(key2)
self._box.lock()
self._box.pack()
self._box.unlock()
self.assertEqual(self._box.get_sequences(),
{'foo':[1, 2, 3, 4, 5],
'unseen':[1], 'bar':[3], 'replied':[3]})
def _get_lock_path(self):
return os.path.join(self._path, '.mh_sequences.lock')
class TestBabyl(_TestSingleFile, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
def tearDown(self):
self._box.close()
self._delete_recursively(self._path)
for lock_remnant in glob.glob(self._path + '.*'):
test_support.unlink(lock_remnant)
def test_labels(self):
# Get labels from the mailbox
self.assertEqual(self._box.get_labels(), [])
msg0 = mailbox.BabylMessage(self._template % 0)
msg0.add_label('foo')
key0 = self._box.add(msg0)
self.assertEqual(self._box.get_labels(), ['foo'])
msg1 = mailbox.BabylMessage(self._template % 1)
msg1.set_labels(['bar', 'answered', 'foo'])
key1 = self._box.add(msg1)
self.assertEqual(set(self._box.get_labels()), set(['foo', 'bar']))
msg0.set_labels(['blah', 'filed'])
self._box[key0] = msg0
self.assertEqual(set(self._box.get_labels()),
set(['foo', 'bar', 'blah']))
self._box.remove(key1)
self.assertEqual(set(self._box.get_labels()), set(['blah']))
class TestMessage(TestBase, unittest.TestCase):
_factory = mailbox.Message # Overridden by subclasses to reuse tests
def setUp(self):
self._path = test_support.TESTFN
def tearDown(self):
self._delete_recursively(self._path)
def test_initialize_with_eMM(self):
# Initialize based on email.message.Message instance
eMM = email.message_from_string(_sample_message)
msg = self._factory(eMM)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_string(self):
# Initialize based on string
msg = self._factory(_sample_message)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_file(self):
# Initialize based on contents of file
with open(self._path, 'w+') as f:
f.write(_sample_message)
f.seek(0)
msg = self._factory(f)
self._post_initialize_hook(msg)
self._check_sample(msg)
def test_initialize_with_nothing(self):
# Initialize without arguments
msg = self._factory()
self._post_initialize_hook(msg)
self.assertIsInstance(msg, email.message.Message)
self.assertIsInstance(msg, mailbox.Message)
self.assertIsInstance(msg, self._factory)
self.assertEqual(msg.keys(), [])
self.assertFalse(msg.is_multipart())
self.assertEqual(msg.get_payload(), None)
def test_initialize_incorrectly(self):
# Initialize with invalid argument
self.assertRaises(TypeError, lambda: self._factory(object()))
def test_become_message(self):
# Take on the state of another message
eMM = email.message_from_string(_sample_message)
msg = self._factory()
msg._become_message(eMM)
self._check_sample(msg)
def test_explain_to(self):
# Copy self's format-specific data to other message formats.
# This test is superficial; better ones are in TestMessageConversion.
msg = self._factory()
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
other_msg = class_()
msg._explain_to(other_msg)
other_msg = email.message.Message()
self.assertRaises(TypeError, lambda: msg._explain_to(other_msg))
def _post_initialize_hook(self, msg):
# Overridden by subclasses to check extra things after initialization
pass
class TestMaildirMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MaildirMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._subdir, 'new')
self.assertEqual(msg._info,'')
def test_subdir(self):
# Use get_subdir() and set_subdir()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_subdir(), 'new')
msg.set_subdir('cur')
self.assertEqual(msg.get_subdir(), 'cur')
msg.set_subdir('new')
self.assertEqual(msg.get_subdir(), 'new')
self.assertRaises(ValueError, lambda: msg.set_subdir('tmp'))
self.assertEqual(msg.get_subdir(), 'new')
msg.set_subdir('new')
self.assertEqual(msg.get_subdir(), 'new')
self._check_sample(msg)
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_subdir(), 'new')
msg.set_flags('F')
self.assertEqual(msg.get_subdir(), 'new')
self.assertEqual(msg.get_flags(), 'F')
msg.set_flags('SDTP')
self.assertEqual(msg.get_flags(), 'DPST')
msg.add_flag('FT')
self.assertEqual(msg.get_flags(), 'DFPST')
msg.remove_flag('TDRP')
self.assertEqual(msg.get_flags(), 'FS')
self.assertEqual(msg.get_subdir(), 'new')
self._check_sample(msg)
def test_date(self):
# Use get_date() and set_date()
msg = mailbox.MaildirMessage(_sample_message)
diff = msg.get_date() - time.time()
self.assertTrue(abs(diff) < 60, diff)
msg.set_date(0.0)
self.assertEqual(msg.get_date(), 0.0)
def test_info(self):
# Use get_info() and set_info()
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_info(), '')
msg.set_info('1,foo=bar')
self.assertEqual(msg.get_info(), '1,foo=bar')
self.assertRaises(TypeError, lambda: msg.set_info(None))
self._check_sample(msg)
def test_info_and_flags(self):
# Test interaction of info and flag methods
msg = mailbox.MaildirMessage(_sample_message)
self.assertEqual(msg.get_info(), '')
msg.set_flags('SF')
self.assertEqual(msg.get_flags(), 'FS')
self.assertEqual(msg.get_info(), '2,FS')
msg.set_info('1,')
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_info(), '1,')
msg.remove_flag('RPT')
self.assertEqual(msg.get_flags(), '')
self.assertEqual(msg.get_info(), '1,')
msg.add_flag('D')
self.assertEqual(msg.get_flags(), 'D')
self.assertEqual(msg.get_info(), '2,D')
self._check_sample(msg)
class _TestMboxMMDFMessage:
_factory = mailbox._mboxMMDFMessage
def _post_initialize_hook(self, msg):
self._check_from(msg)
def test_initialize_with_unixfrom(self):
# Initialize with a message that already has a _unixfrom attribute
msg = mailbox.Message(_sample_message)
msg.set_unixfrom('From foo@bar blah')
msg = mailbox.mboxMessage(msg)
self.assertEqual(msg.get_from(), 'foo@bar blah')
def test_from(self):
# Get and set "From " line
msg = mailbox.mboxMessage(_sample_message)
self._check_from(msg)
msg.set_from('foo bar')
self.assertEqual(msg.get_from(), 'foo bar')
msg.set_from('foo@bar', True)
self._check_from(msg, 'foo@bar')
msg.set_from('blah@temp', time.localtime())
self._check_from(msg, 'blah@temp')
def test_flags(self):
# Use get_flags(), set_flags(), add_flag(), remove_flag()
msg = mailbox.mboxMessage(_sample_message)
self.assertEqual(msg.get_flags(), '')
msg.set_flags('F')
self.assertEqual(msg.get_flags(), 'F')
msg.set_flags('XODR')
self.assertEqual(msg.get_flags(), 'RODX')
msg.add_flag('FA')
self.assertEqual(msg.get_flags(), 'RODFAX')
msg.remove_flag('FDXA')
self.assertEqual(msg.get_flags(), 'RO')
self._check_sample(msg)
def _check_from(self, msg, sender=None):
# Check contents of "From " line
if sender is None:
sender = "MAILER-DAEMON"
self.assertTrue(re.match(sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:"
r"\d{2} \d{4}", msg.get_from()))
class TestMboxMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.mboxMessage
class TestMHMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MHMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._sequences, [])
def test_sequences(self):
# Get, set, join, and leave sequences
msg = mailbox.MHMessage(_sample_message)
self.assertEqual(msg.get_sequences(), [])
msg.set_sequences(['foobar'])
self.assertEqual(msg.get_sequences(), ['foobar'])
msg.set_sequences([])
self.assertEqual(msg.get_sequences(), [])
msg.add_sequence('unseen')
self.assertEqual(msg.get_sequences(), ['unseen'])
msg.add_sequence('flagged')
self.assertEqual(msg.get_sequences(), ['unseen', 'flagged'])
msg.add_sequence('flagged')
self.assertEqual(msg.get_sequences(), ['unseen', 'flagged'])
msg.remove_sequence('unseen')
self.assertEqual(msg.get_sequences(), ['flagged'])
msg.add_sequence('foobar')
self.assertEqual(msg.get_sequences(), ['flagged', 'foobar'])
msg.remove_sequence('replied')
self.assertEqual(msg.get_sequences(), ['flagged', 'foobar'])
msg.set_sequences(['foobar', 'replied'])
self.assertEqual(msg.get_sequences(), ['foobar', 'replied'])
class TestBabylMessage(TestMessage, unittest.TestCase):
_factory = mailbox.BabylMessage
def _post_initialize_hook(self, msg):
self.assertEqual(msg._labels, [])
def test_labels(self):
# Get, set, join, and leave labels
msg = mailbox.BabylMessage(_sample_message)
self.assertEqual(msg.get_labels(), [])
msg.set_labels(['foobar'])
self.assertEqual(msg.get_labels(), ['foobar'])
msg.set_labels([])
self.assertEqual(msg.get_labels(), [])
msg.add_label('filed')
self.assertEqual(msg.get_labels(), ['filed'])
msg.add_label('resent')
self.assertEqual(msg.get_labels(), ['filed', 'resent'])
msg.add_label('resent')
self.assertEqual(msg.get_labels(), ['filed', 'resent'])
msg.remove_label('filed')
self.assertEqual(msg.get_labels(), ['resent'])
msg.add_label('foobar')
self.assertEqual(msg.get_labels(), ['resent', 'foobar'])
msg.remove_label('unseen')
self.assertEqual(msg.get_labels(), ['resent', 'foobar'])
msg.set_labels(['foobar', 'answered'])
self.assertEqual(msg.get_labels(), ['foobar', 'answered'])
def test_visible(self):
# Get, set, and update visible headers
msg = mailbox.BabylMessage(_sample_message)
visible = msg.get_visible()
self.assertEqual(visible.keys(), [])
self.assertIs(visible.get_payload(), None)
visible['User-Agent'] = 'FooBar 1.0'
visible['X-Whatever'] = 'Blah'
self.assertEqual(msg.get_visible().keys(), [])
msg.set_visible(visible)
visible = msg.get_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
self.assertEqual(visible['User-Agent'], 'FooBar 1.0')
self.assertEqual(visible['X-Whatever'], 'Blah')
self.assertIs(visible.get_payload(), None)
msg.update_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
self.assertIs(visible.get_payload(), None)
visible = msg.get_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'Date', 'From', 'To',
'Subject'])
for header in ('User-Agent', 'Date', 'From', 'To', 'Subject'):
self.assertEqual(visible[header], msg[header])
class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.MMDFMessage
class TestMessageConversion(TestBase, unittest.TestCase):
def test_plain_to_x(self):
# Convert Message to all formats
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
msg_plain = mailbox.Message(_sample_message)
msg = class_(msg_plain)
self._check_sample(msg)
def test_x_to_plain(self):
# Convert all formats to Message
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
msg = class_(_sample_message)
msg_plain = mailbox.Message(msg)
self._check_sample(msg_plain)
def test_x_to_invalid(self):
# Convert all formats to an invalid format
for class_ in (mailbox.Message, mailbox.MaildirMessage,
mailbox.mboxMessage, mailbox.MHMessage,
mailbox.BabylMessage, mailbox.MMDFMessage):
self.assertRaises(TypeError, lambda: class_(False))
def test_maildir_to_maildir(self):
# Convert MaildirMessage to MaildirMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_flags('DFPRST')
msg_maildir.set_subdir('cur')
date = msg_maildir.get_date()
msg = mailbox.MaildirMessage(msg_maildir)
self._check_sample(msg)
self.assertEqual(msg.get_flags(), 'DFPRST')
self.assertEqual(msg.get_subdir(), 'cur')
self.assertEqual(msg.get_date(), date)
def test_maildir_to_mboxmmdf(self):
# Convert MaildirMessage to mboxmessage and MMDFMessage
pairs = (('D', ''), ('F', 'F'), ('P', ''), ('R', 'A'), ('S', 'R'),
('T', 'D'), ('DFPRST', 'RDFA'))
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_maildir = mailbox.MaildirMessage(_sample_message)
msg_maildir.set_date(0.0)
for setting, result in pairs:
msg_maildir.set_flags(setting)
msg = class_(msg_maildir)
self.assertEqual(msg.get_flags(), result)
self.assertEqual(msg.get_from(), 'MAILER-DAEMON %s' %
time.asctime(time.gmtime(0.0)))
msg_maildir.set_subdir('cur')
self.assertEqual(class_(msg_maildir).get_flags(), 'RODFA')
def test_maildir_to_mh(self):
# Convert MaildirMessage to MHMessage
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen', 'flagged']),
('P', ['unseen']), ('R', ['unseen', 'replied']), ('S', []),
('T', ['unseen']), ('DFPRST', ['replied', 'flagged']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assertEqual(mailbox.MHMessage(msg_maildir).get_sequences(),
result)
def test_maildir_to_babyl(self):
# Convert MaildirMessage to Babyl
msg_maildir = mailbox.MaildirMessage(_sample_message)
pairs = (('D', ['unseen']), ('F', ['unseen']),
('P', ['unseen', 'forwarded']), ('R', ['unseen', 'answered']),
('S', []), ('T', ['unseen', 'deleted']),
('DFPRST', ['deleted', 'answered', 'forwarded']))
for setting, result in pairs:
msg_maildir.set_flags(setting)
self.assertEqual(mailbox.BabylMessage(msg_maildir).get_labels(),
result)
def test_mboxmmdf_to_maildir(self):
# Convert mboxMessage and MMDFMessage to MaildirMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_from('foo@bar', time.gmtime(0.0))
pairs = (('R', 'S'), ('O', ''), ('D', 'T'), ('F', 'F'), ('A', 'R'),
('RODFA', 'FRST'))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
msg = mailbox.MaildirMessage(msg_mboxMMDF)
self.assertEqual(msg.get_flags(), result)
self.assertEqual(msg.get_date(), 0.0)
msg_mboxMMDF.set_flags('O')
self.assertEqual(mailbox.MaildirMessage(msg_mboxMMDF).get_subdir(),
'cur')
def test_mboxmmdf_to_mboxmmdf(self):
# Convert mboxMessage and MMDFMessage to mboxMessage and MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
msg_mboxMMDF.set_flags('RODFA')
msg_mboxMMDF.set_from('foo@bar')
for class2_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg2 = class2_(msg_mboxMMDF)
self.assertEqual(msg2.get_flags(), 'RODFA')
self.assertEqual(msg2.get_from(), 'foo@bar')
def test_mboxmmdf_to_mh(self):
# Convert mboxMessage and MMDFMessage to MHMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg_mboxMMDF = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']), ('D', ['unseen']),
('F', ['unseen', 'flagged']),
('A', ['unseen', 'replied']),
('RODFA', ['replied', 'flagged']))
for setting, result in pairs:
msg_mboxMMDF.set_flags(setting)
self.assertEqual(mailbox.MHMessage(msg_mboxMMDF).get_sequences(),
result)
def test_mboxmmdf_to_babyl(self):
# Convert mboxMessage and MMDFMessage to BabylMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = class_(_sample_message)
pairs = (('R', []), ('O', ['unseen']),
('D', ['unseen', 'deleted']), ('F', ['unseen']),
('A', ['unseen', 'answered']),
('RODFA', ['deleted', 'answered']))
for setting, result in pairs:
msg.set_flags(setting)
self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result)
def test_mh_to_maildir(self):
# Convert MHMessage to MaildirMessage
pairs = (('unseen', ''), ('replied', 'RS'), ('flagged', 'FS'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result)
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'FR')
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
def test_mh_to_mboxmmdf(self):
# Convert MHMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('replied', 'ROA'), ('flagged', 'ROF'))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), 'OFA')
def test_mh_to_mh(self):
# Convert MHMessage to MHMessage
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.MHMessage(msg).get_sequences(),
['unseen', 'replied', 'flagged'])
def test_mh_to_babyl(self):
# Convert MHMessage to BabylMessage
pairs = (('unseen', ['unseen']), ('replied', ['answered']),
('flagged', []))
for setting, result in pairs:
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence(setting)
self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result)
msg = mailbox.MHMessage(_sample_message)
msg.add_sequence('unseen')
msg.add_sequence('replied')
msg.add_sequence('flagged')
self.assertEqual(mailbox.BabylMessage(msg).get_labels(),
['unseen', 'answered'])
def test_babyl_to_maildir(self):
# Convert BabylMessage to MaildirMessage
pairs = (('unseen', ''), ('deleted', 'ST'), ('filed', 'S'),
('answered', 'RS'), ('forwarded', 'PS'), ('edited', 'S'),
('resent', 'PS'))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result)
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'PRT')
self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur')
def test_babyl_to_mboxmmdf(self):
# Convert BabylMessage to mboxMessage and MMDFMessage
pairs = (('unseen', 'O'), ('deleted', 'ROD'), ('filed', 'RO'),
('answered', 'ROA'), ('forwarded', 'RO'), ('edited', 'RO'),
('resent', 'RO'))
for setting, result in pairs:
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(class_(msg).get_flags(), result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
self.assertEqual(class_(msg).get_flags(), 'ODA')
def test_babyl_to_mh(self):
# Convert BabylMessage to MHMessage
pairs = (('unseen', ['unseen']), ('deleted', []), ('filed', []),
('answered', ['replied']), ('forwarded', []), ('edited', []),
('resent', []))
for setting, result in pairs:
msg = mailbox.BabylMessage(_sample_message)
msg.add_label(setting)
self.assertEqual(mailbox.MHMessage(msg).get_sequences(), result)
msg = mailbox.BabylMessage(_sample_message)
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
self.assertEqual(mailbox.MHMessage(msg).get_sequences(),
['unseen', 'replied'])
def test_babyl_to_babyl(self):
# Convert BabylMessage to BabylMessage
msg = mailbox.BabylMessage(_sample_message)
msg.update_visible()
for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded',
'edited', 'resent'):
msg.add_label(label)
msg2 = mailbox.BabylMessage(msg)
self.assertEqual(msg2.get_labels(), ['unseen', 'deleted', 'filed',
'answered', 'forwarded', 'edited',
'resent'])
self.assertEqual(msg.get_visible().keys(), msg2.get_visible().keys())
for key in msg.get_visible().keys():
self.assertEqual(msg.get_visible()[key], msg2.get_visible()[key])
class TestProxyFileBase(TestBase):
def _test_read(self, proxy):
# Read by byte
proxy.seek(0)
self.assertEqual(proxy.read(), 'bar')
proxy.seek(1)
self.assertEqual(proxy.read(), 'ar')
proxy.seek(0)
self.assertEqual(proxy.read(2), 'ba')
proxy.seek(1)
self.assertEqual(proxy.read(-1), 'ar')
proxy.seek(2)
self.assertEqual(proxy.read(1000), 'r')
def _test_readline(self, proxy):
# Read by line
proxy.seek(0)
self.assertEqual(proxy.readline(), 'foo' + os.linesep)
self.assertEqual(proxy.readline(), 'bar' + os.linesep)
self.assertEqual(proxy.readline(), 'fred' + os.linesep)
self.assertEqual(proxy.readline(), 'bob')
proxy.seek(2)
self.assertEqual(proxy.readline(), 'o' + os.linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assertEqual(proxy.readline(), 'fred' + os.linesep)
proxy.seek(6 + 2 * len(os.linesep))
self.assertEqual(proxy.readline(2), 'fr')
self.assertEqual(proxy.readline(-10), 'ed' + os.linesep)
def _test_readlines(self, proxy):
# Read multiple lines
proxy.seek(0)
self.assertEqual(proxy.readlines(), ['foo' + os.linesep,
'bar' + os.linesep,
'fred' + os.linesep, 'bob'])
proxy.seek(0)
self.assertEqual(proxy.readlines(2), ['foo' + os.linesep])
proxy.seek(3 + len(os.linesep))
self.assertEqual(proxy.readlines(4 + len(os.linesep)),
['bar' + os.linesep, 'fred' + os.linesep])
proxy.seek(3)
self.assertEqual(proxy.readlines(1000), [os.linesep, 'bar' + os.linesep,
'fred' + os.linesep, 'bob'])
def _test_iteration(self, proxy):
# Iterate by line
proxy.seek(0)
iterator = iter(proxy)
self.assertEqual(list(iterator),
['foo' + os.linesep, 'bar' + os.linesep, 'fred' + os.linesep, 'bob'])
def _test_seek_and_tell(self, proxy):
# Seek and use tell to check position
proxy.seek(3)
self.assertEqual(proxy.tell(), 3)
self.assertEqual(proxy.read(len(os.linesep)), os.linesep)
proxy.seek(2, 1)
self.assertEqual(proxy.read(1 + len(os.linesep)), 'r' + os.linesep)
proxy.seek(-3 - len(os.linesep), 2)
self.assertEqual(proxy.read(3), 'bar')
proxy.seek(2, 0)
self.assertEqual(proxy.read(), 'o' + os.linesep + 'bar' + os.linesep)
proxy.seek(100)
self.assertEqual(proxy.read(), '')
def _test_close(self, proxy):
# Close a file
proxy.close()
# Issue 11700 subsequent closes should be a no-op, not an error.
proxy.close()
class TestProxyFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = test_support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write('foo')
pos = self._file.tell()
proxy0 = mailbox._ProxyFile(self._file)
self.assertEqual(proxy0.tell(), pos)
self.assertEqual(self._file.tell(), pos)
proxy1 = mailbox._ProxyFile(self._file, 0)
self.assertEqual(proxy1.tell(), 0)
self.assertEqual(self._file.tell(), pos)
def test_read(self):
self._file.write('bar')
self._test_read(mailbox._ProxyFile(self._file))
def test_readline(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_readline(mailbox._ProxyFile(self._file))
def test_readlines(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_readlines(mailbox._ProxyFile(self._file))
def test_iteration(self):
self._file.write('foo%sbar%sfred%sbob' % (os.linesep, os.linesep,
os.linesep))
self._test_iteration(mailbox._ProxyFile(self._file))
def test_seek_and_tell(self):
self._file.write('foo%sbar%s' % (os.linesep, os.linesep))
self._test_seek_and_tell(mailbox._ProxyFile(self._file))
def test_close(self):
self._file.write('foo%sbar%s' % (os.linesep, os.linesep))
self._test_close(mailbox._ProxyFile(self._file))
class TestPartialFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = test_support.TESTFN
self._file = open(self._path, 'wb+')
def tearDown(self):
self._file.close()
self._delete_recursively(self._path)
def test_initialize(self):
# Initialize and check position
self._file.write('foo' + os.linesep + 'bar')
pos = self._file.tell()
proxy = mailbox._PartialFile(self._file, 2, 5)
self.assertEqual(proxy.tell(), 0)
self.assertEqual(self._file.tell(), pos)
def test_read(self):
self._file.write('***bar***')
self._test_read(mailbox._PartialFile(self._file, 3, 6))
def test_readline(self):
self._file.write('!!!!!foo%sbar%sfred%sbob!!!!!' %
(os.linesep, os.linesep, os.linesep))
self._test_readline(mailbox._PartialFile(self._file, 5,
18 + 3 * len(os.linesep)))
def test_readlines(self):
self._file.write('foo%sbar%sfred%sbob?????' %
(os.linesep, os.linesep, os.linesep))
self._test_readlines(mailbox._PartialFile(self._file, 0,
13 + 3 * len(os.linesep)))
def test_iteration(self):
self._file.write('____foo%sbar%sfred%sbob####' %
(os.linesep, os.linesep, os.linesep))
self._test_iteration(mailbox._PartialFile(self._file, 4,
17 + 3 * len(os.linesep)))
def test_seek_and_tell(self):
self._file.write('(((foo%sbar%s$$$' % (os.linesep, os.linesep))
self._test_seek_and_tell(mailbox._PartialFile(self._file, 3,
9 + 2 * len(os.linesep)))
def test_close(self):
self._file.write('&foo%sbar%s^' % (os.linesep, os.linesep))
self._test_close(mailbox._PartialFile(self._file, 1,
6 + 3 * len(os.linesep)))
## Start: tests from the original module (for backward compatibility).
FROM_ = "From some.body@dummy.domain Sat Jul 24 13:43:35 2004\n"
DUMMY_MESSAGE = """\
From: some.body@dummy.domain
To: me@my.domain
Subject: Simple Test
This is a dummy message.
"""
class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = test_support.TESTFN
if os.path.isdir(self._dir):
test_support.rmtree(self._dir)
if os.path.isfile(self._dir):
test_support.unlink(self._dir)
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
os.mkdir(os.path.join(self._dir, "new"))
self._counter = 1
self._msgfiles = []
def tearDown(self):
map(os.unlink, self._msgfiles)
test_support.rmdir(os.path.join(self._dir, "cur"))
test_support.rmdir(os.path.join(self._dir, "tmp"))
test_support.rmdir(os.path.join(self._dir, "new"))
test_support.rmdir(self._dir)
def createMessage(self, dir, mbox=False):
t = int(time.time() % 1000000)
pid = self._counter
self._counter += 1
filename = os.extsep.join((str(t), str(pid), "myhostname", "mydomain"))
tmpname = os.path.join(self._dir, "tmp", filename)
newname = os.path.join(self._dir, dir, filename)
with open(tmpname, "w") as fp:
self._msgfiles.append(tmpname)
if mbox:
fp.write(FROM_)
fp.write(DUMMY_MESSAGE)
if hasattr(os, "link"):
os.link(tmpname, newname)
else:
with open(newname, "w") as fp:
fp.write(DUMMY_MESSAGE)
self._msgfiles.append(newname)
return tmpname
def test_empty_maildir(self):
"""Test an empty maildir mailbox"""
# Test for regression on bug #117490:
# Make sure the boxes attribute actually gets set.
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assertTrue(hasattr(self.mbox, "boxes"))
#self.assertTrue(len(self.mbox.boxes) == 0)
self.assertIs(self.mbox.next(), None)
self.assertIs(self.mbox.next(), None)
def test_nonempty_maildir_cur(self):
self.createMessage("cur")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assertTrue(len(self.mbox.boxes) == 1)
msg = self.mbox.next()
self.assertIsNot(msg, None)
msg.fp.close()
self.assertIs(self.mbox.next(), None)
self.assertIs(self.mbox.next(), None)
def test_nonempty_maildir_new(self):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assertTrue(len(self.mbox.boxes) == 1)
msg = self.mbox.next()
self.assertIsNot(msg, None)
msg.fp.close()
self.assertIs(self.mbox.next(), None)
self.assertIs(self.mbox.next(), None)
def test_nonempty_maildir_both(self):
self.createMessage("cur")
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assertTrue(len(self.mbox.boxes) == 2)
msg = self.mbox.next()
self.assertIsNot(msg, None)
msg.fp.close()
msg = self.mbox.next()
self.assertIsNot(msg, None)
msg.fp.close()
self.assertIs(self.mbox.next(), None)
self.assertIs(self.mbox.next(), None)
def test_unix_mbox(self):
### should be better!
import email.parser
fname = self.createMessage("cur", True)
n = 0
fid = open(fname)
for msg in mailbox.PortableUnixMailbox(fid,
email.parser.Parser().parse):
n += 1
self.assertEqual(msg["subject"], "Simple Test")
self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE))
fid.close()
self.assertEqual(n, 1)
## End: classes from the original module (for backward compatibility).
_sample_message = """\
Return-Path: <gkj@gregorykjohnson.com>
X-Original-To: gkj+person@localhost
Delivered-To: gkj+person@localhost
Received: from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Delivered-To: gkj@sundance.gregorykjohnson.com
Received: from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)
Received: from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <gkj@gregorykjohnson.com>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Received: by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)
Date: Wed, 13 Jul 2005 17:23:11 -0400
From: "Gregory K. Johnson" <gkj@gregorykjohnson.com>
To: gkj@gregorykjohnson.com
Subject: Sample message
Message-ID: <20050713212311.GC4701@andy.gregorykjohnson.com>
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary="NMuMz9nt05w80d4+"
Content-Disposition: inline
User-Agent: Mutt/1.5.9i
--NMuMz9nt05w80d4+
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
This is a sample message.
--
Gregory K. Johnson
--NMuMz9nt05w80d4+
Content-Type: application/octet-stream
Content-Disposition: attachment; filename="text.gz"
Content-Transfer-Encoding: base64
H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
--NMuMz9nt05w80d4+--
"""
_sample_headers = {
"Return-Path":"<gkj@gregorykjohnson.com>",
"X-Original-To":"gkj+person@localhost",
"Delivered-To":"gkj+person@localhost",
"Received":"""from localhost (localhost [127.0.0.1])
by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17
for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Delivered-To":"gkj@sundance.gregorykjohnson.com",
"Received":"""from localhost [127.0.0.1]
by localhost with POP3 (fetchmail-6.2.5)
for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""",
"Received":"""from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228])
by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746
for <gkj@gregorykjohnson.com>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Received":"""by andy.gregorykjohnson.com (Postfix, from userid 1000)
id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""",
"Date":"Wed, 13 Jul 2005 17:23:11 -0400",
"From":""""Gregory K. Johnson" <gkj@gregorykjohnson.com>""",
"To":"gkj@gregorykjohnson.com",
"Subject":"Sample message",
"Mime-Version":"1.0",
"Content-Type":"""multipart/mixed; boundary="NMuMz9nt05w80d4+\"""",
"Content-Disposition":"inline",
"User-Agent": "Mutt/1.5.9i" }
_sample_payloads = ("""This is a sample message.
--
Gregory K. Johnson
""",
"""H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs
3FYlAAAA
""")
def test_main():
tests = (TestMailboxSuperclass, TestMaildir, TestMbox, TestMMDF, TestMH,
TestBabyl, TestMessage, TestMaildirMessage, TestMboxMessage,
TestMHMessage, TestBabylMessage, TestMMDFMessage,
TestMessageConversion, TestProxyFile, TestPartialFile,
MaildirTestCase)
test_support.run_unittest(*tests)
test_support.reap_children()
if __name__ == '__main__':
test_main()
| gpl-2.0 |
jhjguxin/blogserver | lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/runserver.py | 163 | 1264 | from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import BaseRunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(BaseRunserverCommand):
option_list = BaseRunserverCommand.option_list + (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development and also serves static files."
def get_handler(self, *args, **options):
"""
Returns the static files serving handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler
| mit |
sestrella/ansible | lib/ansible/modules/remote_management/cpm/cpm_plugcontrol.py | 55 | 7369 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2018 Red Hat Inc.
# Copyright (C) 2018 Western Telematic Inc. <kenp@wti.com>
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to execute WTI Plug Commands on WTI OOB and PDU devices.
# WTI remote_management
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: cpm_plugcontrol
version_added: "2.8"
author: "Western Telematic Inc. (@wtinetworkgear)"
short_description: Get and Set Plug actions on WTI OOB and PDU power devices
description:
- "Get and Set Plug actions on WTI OOB and PDU devices"
options:
cpm_action:
description:
- This is the Action to send the module.
required: true
choices: [ "getplugcontrol", "setplugcontrol" ]
cpm_url:
description:
- This is the URL of the WTI device to send the module.
required: true
cpm_username:
description:
- This is the Username of the WTI device to send the module.
cpm_password:
description:
- This is the Password of the WTI device to send the module.
use_https:
description:
- Designates to use an https connection or http connection.
required: false
type: bool
default: true
validate_certs:
description:
- If false, SSL certificates will not be validated. This should only be used
- on personally controlled sites using self-signed certificates.
required: false
type: bool
default: true
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
required: false
type: bool
default: false
plug_id:
description:
- This is the plug number or the plug name that is to be manipulated
For the plugget command, the plug_id 'all' will return the status of all the plugs the
user has rights to access.
required: true
plug_state:
description:
- This is what action to take on the plug.
required: false
choices: [ "on", "off", "boot", "default" ]
"""
EXAMPLES = """
# Get Plug status for all ports
- name: Get the Plug status for ALL ports of a WTI device
cpm_plugcontrol:
cpm_action: "getplugcontrol"
cpm_url: "rest.wti.com"
cpm_username: "restpower"
cpm_password: "restfulpowerpass12"
use_https: true
validate_certs: true
plug_id: "all"
# Get Plug status for port 2
- name: Get the Plug status for the given port of a WTI device
cpm_plugcontrol:
cpm_action: "getplugcontrol"
cpm_url: "rest.wti.com"
cpm_username: "restpower"
cpm_password: "restfulpowerpass12"
use_https: true
validate_certs: false
plug_id: "2"
# Reboot plug 5
- name: Reboot Plug 5 on a given WTI device
cpm_plugcontrol:
cpm_action: "setplugcontrol"
cpm_url: "rest.wti.com"
cpm_username: "restpower"
cpm_password: "restfulpowerpass12"
use_https: true
plug_id: "5"
plug_state: "boot"
"""
RETURN = """
data:
description: The output JSON returned from the commands sent
returned: always
type: str
"""
import base64
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
def assemble_json(cpmmodule, cpmresult):
json_load = ""
plugspassed = cpmmodule.params["plug_id"].split(",")
for val in plugspassed:
if (val.isdigit() is True):
json_load = '%s{"plug": "%s"' % (json_load, to_native(val))
else:
json_load = '%s{"plugname": "%s"' % (json_load, to_native(val))
if cpmmodule.params["plug_state"] is not None:
json_load = '%s,"state": "%s"' % (json_load, to_native(cpmmodule.params["plug_state"]))
json_load = '%s}' % (json_load)
return json_load
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
cpm_action=dict(choices=['getplugcontrol', 'setplugcontrol'], required=True),
cpm_url=dict(type='str', required=True),
cpm_username=dict(type='str', required=True),
cpm_password=dict(type='str', required=True, no_log=True),
plug_id=dict(type='str', required=True),
plug_state=dict(choices=['on', 'off', 'boot', 'default'], required=False),
use_https=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
use_proxy=dict(type='bool', default=False)
)
result = dict(
changed=False,
data=''
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if module.check_mode:
return result
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])),
errors='surrogate_or_strict')))
if module.params['use_https'] is True:
protocol = "https://"
else:
protocol = "http://"
Payload = None
if (module.params['cpm_action'] == 'getplugcontrol'):
fullurl = ("%s%s/api/v2/config/powerplug" % (protocol, to_native(module.params['cpm_url'])))
if (module.params['plug_id'].lower() != 'all'):
fullurl = '%s?plug=%s' % (fullurl, to_native(module.params['plug_id']))
method = 'GET'
elif (module.params['cpm_action'] == 'setplugcontrol'):
Payload = assemble_json(module, result)
fullurl = ("%s%s/api/v2/config/powerplug" % (protocol, to_native(module.params['cpm_url'])))
method = 'POST'
try:
response = open_url(fullurl, data=Payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
if (method != 'GET'):
result['changed'] = True
except HTTPError as e:
fail_json = dict(msg='Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='Error connecting to for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['data'] = json.loads(response.read())
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
Eric-Gaudiello/tensorflow_dev | tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/pip/compat/__init__.py | 248 | 3402 | """Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress
except ImportError:
import ipaddr as ipaddress
ipaddress.ip_address = ipaddress.IPAddress
ipaddress.ip_network = ipaddress.IPNetwork
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS",
]
if sys.version_info >= (3, 4):
uses_pycache = True
from importlib.util import cache_from_source
else:
import imp
uses_pycache = hasattr(imp, 'cache_from_source')
if uses_pycache:
cache_from_source = imp.cache_from_source
else:
cache_from_source = None
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ['python', 'wsgiref']
if sys.version_info >= (2, 7):
stdlib_pkgs.extend(['argparse'])
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt'))
| gpl-3.0 |
TheTypoMaster/chromium-crosswalk | build/android/gyp/util/build_utils.py | 36 | 10668 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import contextlib
import fnmatch
import json
import os
import pipes
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import zipfile
CHROMIUM_SRC = os.path.normpath(
os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir))
COLORAMA_ROOT = os.path.join(CHROMIUM_SRC,
'third_party', 'colorama', 'src')
# aapt should ignore OWNERS files in addition the default ignore pattern.
AAPT_IGNORE_PATTERN = ('!OWNERS:!.svn:!.git:!.ds_store:!*.scc:.*:<dir>_*:' +
'!CVS:!thumbs.db:!picasa.ini:!*~:!*.d.stamp')
@contextlib.contextmanager
def TempDir():
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
def MakeDirectory(dir_path):
try:
os.makedirs(dir_path)
except OSError:
pass
def DeleteDirectory(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def Touch(path, fail_if_missing=False):
if fail_if_missing and not os.path.exists(path):
raise Exception(path + ' doesn\'t exist.')
MakeDirectory(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def FindInDirectory(directory, filename_filter):
files = []
for root, _dirnames, filenames in os.walk(directory):
matched_files = fnmatch.filter(filenames, filename_filter)
files.extend((os.path.join(root, f) for f in matched_files))
return files
def FindInDirectories(directories, filename_filter):
all_files = []
for directory in directories:
all_files.extend(FindInDirectory(directory, filename_filter))
return all_files
def ParseGnList(gn_string):
return ast.literal_eval(gn_string)
def ParseGypList(gyp_string):
# The ninja generator doesn't support $ in strings, so use ## to
# represent $.
# TODO(cjhopman): Remove when
# https://code.google.com/p/gyp/issues/detail?id=327
# is addressed.
gyp_string = gyp_string.replace('##', '$')
if gyp_string.startswith('['):
return ParseGnList(gyp_string)
return shlex.split(gyp_string)
def CheckOptions(options, parser, required=None):
if not required:
return
for option_name in required:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
def WriteJson(obj, path, only_if_changed=False):
old_dump = None
if os.path.exists(path):
with open(path, 'r') as oldfile:
old_dump = oldfile.read()
new_dump = json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
if not only_if_changed or old_dump != new_dump:
with open(path, 'w') as outfile:
outfile.write(new_dump)
def ReadJson(path):
with open(path, 'r') as jsonfile:
return json.load(jsonfile)
class CalledProcessError(Exception):
"""This exception is raised when the process run by CheckOutput
exits with a non-zero exit code."""
def __init__(self, cwd, args, output):
super(CalledProcessError, self).__init__()
self.cwd = cwd
self.args = args
self.output = output
def __str__(self):
# A user should be able to simply copy and paste the command that failed
# into their shell.
copyable_command = '( cd {}; {} )'.format(os.path.abspath(self.cwd),
' '.join(map(pipes.quote, self.args)))
return 'Command failed: {}\n{}'.format(copyable_command, self.output)
# This can be used in most cases like subprocess.check_output(). The output,
# particularly when the command fails, better highlights the command's failure.
# If the command fails, raises a build_utils.CalledProcessError.
def CheckOutput(args, cwd=None,
print_stdout=False, print_stderr=True,
stdout_filter=None,
stderr_filter=None,
fail_func=lambda returncode, stderr: returncode != 0):
if not cwd:
cwd = os.getcwd()
child = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
stdout, stderr = child.communicate()
if stdout_filter is not None:
stdout = stdout_filter(stdout)
if stderr_filter is not None:
stderr = stderr_filter(stderr)
if fail_func(child.returncode, stderr):
raise CalledProcessError(cwd, args, stdout + stderr)
if print_stdout:
sys.stdout.write(stdout)
if print_stderr:
sys.stderr.write(stderr)
return stdout
def GetModifiedTime(path):
# For a symlink, the modified time should be the greater of the link's
# modified time and the modified time of the target.
return max(os.lstat(path).st_mtime, os.stat(path).st_mtime)
def IsTimeStale(output, inputs):
if not os.path.exists(output):
return True
output_time = GetModifiedTime(output)
for i in inputs:
if GetModifiedTime(i) > output_time:
return True
return False
def IsDeviceReady():
device_state = CheckOutput(['adb', 'get-state'])
return device_state.strip() == 'device'
def CheckZipPath(name):
if os.path.normpath(name) != name:
raise Exception('Non-canonical zip path: %s' % name)
if os.path.isabs(name):
raise Exception('Absolute zip path: %s' % name)
def ExtractAll(zip_path, path=None, no_clobber=True, pattern=None):
if path is None:
path = os.getcwd()
elif not os.path.exists(path):
MakeDirectory(path)
with zipfile.ZipFile(zip_path) as z:
for name in z.namelist():
if name.endswith('/'):
continue
if pattern is not None:
if not fnmatch.fnmatch(name, pattern):
continue
CheckZipPath(name)
if no_clobber:
output_path = os.path.join(path, name)
if os.path.exists(output_path):
raise Exception(
'Path already exists from zip: %s %s %s'
% (zip_path, name, output_path))
z.extractall(path=path)
def DoZip(inputs, output, base_dir):
with zipfile.ZipFile(output, 'w') as outfile:
for f in inputs:
CheckZipPath(os.path.relpath(f, base_dir))
outfile.write(f, os.path.relpath(f, base_dir))
def ZipDir(output, base_dir):
with zipfile.ZipFile(output, 'w') as outfile:
for root, _, files in os.walk(base_dir):
for f in files:
path = os.path.join(root, f)
archive_path = os.path.relpath(path, base_dir)
CheckZipPath(archive_path)
outfile.write(path, archive_path)
def MergeZips(output, inputs, exclude_patterns=None):
added_names = set()
def Allow(name):
if exclude_patterns is not None:
for p in exclude_patterns:
if fnmatch.fnmatch(name, p):
return False
return True
with zipfile.ZipFile(output, 'w') as out_zip:
for in_file in inputs:
with zipfile.ZipFile(in_file, 'r') as in_zip:
for name in in_zip.namelist():
if name not in added_names and Allow(name):
out_zip.writestr(name, in_zip.read(name))
added_names.add(name)
def PrintWarning(message):
print 'WARNING: ' + message
def PrintBigWarning(message):
print '***** ' * 8
PrintWarning(message)
print '***** ' * 8
def GetSortedTransitiveDependencies(top, deps_func):
"""Gets the list of all transitive dependencies in sorted order.
There should be no cycles in the dependency graph.
Args:
top: a list of the top level nodes
deps_func: A function that takes a node and returns its direct dependencies.
Returns:
A list of all transitive dependencies of nodes in top, in order (a node will
appear in the list at a higher index than all of its dependencies).
"""
def Node(dep):
return (dep, deps_func(dep))
# First: find all deps
unchecked_deps = list(top)
all_deps = set(top)
while unchecked_deps:
dep = unchecked_deps.pop()
new_deps = deps_func(dep).difference(all_deps)
unchecked_deps.extend(new_deps)
all_deps = all_deps.union(new_deps)
# Then: simple, slow topological sort.
sorted_deps = []
unsorted_deps = dict(map(Node, all_deps))
while unsorted_deps:
for library, dependencies in unsorted_deps.items():
if not dependencies.intersection(unsorted_deps.keys()):
sorted_deps.append(library)
del unsorted_deps[library]
return sorted_deps
def GetPythonDependencies():
"""Gets the paths of imported non-system python modules.
A path is assumed to be a "system" import if it is outside of chromium's
src/. The paths will be relative to the current directory.
"""
module_paths = (m.__file__ for m in sys.modules.itervalues()
if m is not None and hasattr(m, '__file__'))
abs_module_paths = map(os.path.abspath, module_paths)
non_system_module_paths = [
p for p in abs_module_paths if p.startswith(CHROMIUM_SRC)]
def ConvertPycToPy(s):
if s.endswith('.pyc'):
return s[:-1]
return s
non_system_module_paths = map(ConvertPycToPy, non_system_module_paths)
non_system_module_paths = map(os.path.relpath, non_system_module_paths)
return sorted(set(non_system_module_paths))
def AddDepfileOption(parser):
parser.add_option('--depfile',
help='Path to depfile. This must be specified as the '
'action\'s first output.')
def WriteDepfile(path, dependencies):
with open(path, 'w') as depfile:
depfile.write(path)
depfile.write(': ')
depfile.write(' '.join(dependencies))
depfile.write('\n')
def ExpandFileArgs(args):
"""Replaces file-arg placeholders in args.
These placeholders have the form:
@FileArg(filename:key1:key2:...:keyn)
The value of such a placeholder is calculated by reading 'filename' as json.
And then extracting the value at [key1][key2]...[keyn].
Note: This intentionally does not return the list of files that appear in such
placeholders. An action that uses file-args *must* know the paths of those
files prior to the parsing of the arguments (typically by explicitly listing
them in the action's inputs in build files).
"""
new_args = list(args)
file_jsons = dict()
r = re.compile('@FileArg\((.*?)\)')
for i, arg in enumerate(args):
match = r.search(arg)
if not match:
continue
if match.end() != len(arg):
raise Exception('Unexpected characters after FileArg: ' + arg)
lookup_path = match.group(1).split(':')
file_path = lookup_path[0]
if not file_path in file_jsons:
file_jsons[file_path] = ReadJson(file_path)
expansion = file_jsons[file_path]
for k in lookup_path[1:]:
expansion = expansion[k]
new_args[i] = arg[:match.start()] + str(expansion)
return new_args
| bsd-3-clause |
youdonghai/intellij-community | python/lib/Lib/Queue.py | 90 | 7758 | """A multi-producer, multi-consumer queue."""
from time import time as _time
from collections import deque
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
try:
import threading
except ImportError:
import dummy_threading as threading
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
| apache-2.0 |
TheTypoMaster/my-vim-set-mac | .vim/bundle/YouCompleteMe/third_party/ycmd/third_party/requests/requests/api.py | 160 | 5280 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| gpl-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/contrib/gis/geoip/tests.py | 102 | 4766 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.utils import unittest
from django.utils import six
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city('62.224.93.23')
self.assertEqual('Schümberg', d['city'])
def test06_unicode_query(self):
"Testing that GeoIP accepts unicode string queries, see #17059."
g = GeoIP()
d = g.country('whitehouse.gov')
self.assertEqual('US', d['country_code'])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=1):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
danuzclaudes/robottelo | robottelo/ui/login.py | 4 | 2058 | # -*- encoding: utf-8 -*-
"""Implements Login UI"""
import requests
from robottelo.config import settings
from robottelo.ui.base import Base, UINoSuchElementError
from robottelo.ui.locators import common_locators, locators
from robottelo.ui.navigator import Navigator
class Login(Base):
"""Implements login, logout functions for Foreman UI"""
def login(self, username, password, organization=None, location=None):
"""Logins user from UI"""
if self.wait_until_element(locators['login.username']):
self.field_update('login.username', username)
self.field_update('login.password', password)
self.click(common_locators['submit'])
if self.find_element(common_locators['notif.error']):
return
if location:
nav = Navigator(self.browser)
nav.go_to_select_loc(location)
if organization:
nav = Navigator(self.browser)
nav.go_to_select_org(organization)
def logout(self):
"""Logout user from UI"""
# Scroll to top
self.browser.execute_script('window.scroll(0, 0)')
if self.wait_until_element(locators['login.gravatar']) is None:
raise UINoSuchElementError(
'could not find login.gravatar to sign out')
Navigator(self.browser).go_to_sign_out()
def is_logged(self):
"""Checks whether user is logged by validating a session cookie"""
cookies = dict(
(cookie['name'], cookie['value'])
for cookie in self.browser.get_cookies()
)
# construct the link to the Dashboard web page
url_root = settings.server.get_url() + '/about'
response = requests.get(
url_root,
verify=False,
allow_redirects=False,
cookies=cookies
)
response.raise_for_status()
return (
response.status_code != 302 or
not response.headers['location'].endswith('/login')
)
| gpl-3.0 |
sanketloke/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
rodrigoj42/AnaliseTSE | workbench/vereadores.py | 1 | 2773 | from prettytable import PrettyTable
t = open('dados_rj.txt').readlines()
candidatos = []
for linha in t[2:-1]:
candidatos.append(eval(linha[:-3]))
candidatos.append(eval(t[-1][:-4]))
coligacoes = {}
for candidato in candidatos:
h = candidato['cc'].find('-')
if h == -1: coligacao = candidato['cc']
else: coligacao = coligacao = candidato['cc'][h+2:]
try: coligacoes[coligacao] += int(candidato['v'])
except: coligacoes[coligacao] = int(candidato['v'])
qe = sum(coligacoes.values())/51.0
eleitos = []
tabela = PrettyTable(['Coligacao', 'N Candidatos', 'Votos', 'Quociente Partidario'])
for coligacao in coligacoes:
c_da_c = filter(lambda x: coligacao == x['cc'] if x['cc'].find('-') == -1 else coligacao == x['cc'][x['cc'].find('-')+2:], candidatos)
s = sorted(c_da_c, key=lambda x: int(x['v']))[::-1]
qp = int(coligacoes[coligacao]/qe)
tabela.add_row([coligacao, len(s), coligacoes[coligacao], qp])
coligacoes[coligacao] = {'cadeiras':qp, 'votos':coligacoes[coligacao], 'candidatos':s}
for eleito in s[:qp]: eleitos.append(eleito)
print tabela
print 'Coeficiente Eleitoral: %s \n' % str(int(qe))
blacklist = []
while len(eleitos) < 51:
temp = {}
for coligacao in coligacoes:
try:
if coligacao not in blacklist and coligacoes[coligacao]['votos'] > qe:
temp[coligacoes[coligacao]['votos']/float(coligacoes[coligacao]['cadeiras']+1)] = coligacao
else:
pass
except: pass
coligacao = temp[max(temp.keys())]
eleito = coligacoes[coligacao]['candidatos'][coligacoes[coligacao]['cadeiras']]
if int(eleito['v']) > int(0.1*qe):
coligacoes[coligacao]['cadeiras'] += 1
eleitos.append(eleito)
else:
blacklist.append(coligacao)
target = raw_input('Nome do partido: ')
c_do_p = []
for candidato in candidatos:
if candidato['cc'][:len(target)] == target:
c_do_p.append(candidato)
c_do_p = sorted(c_do_p, key=lambda x: int(x['v']))[::-1]
tabela = PrettyTable(['Posicao', 'Nome', 'Votos'])
for candidato in range(len(c_do_p[:25])):
tabela.add_row([candidato+1, c_do_p[candidato]['nm'], c_do_p[candidato]['v']])
print tabela
raw_input('Aperte ENTER para mostrar os candidatos eleitos')
a = 0
partidos = {}
tabela = PrettyTable(['Posicao', 'Nome', 'Coligacao', 'Votos'])
for eleito in eleitos:
a += 1
tabela.add_row([a, eleito['nm'], eleito['cc'], int(eleito['v'])])
try: partidos[eleito['cc']] += 1
except: partidos[eleito['cc']] = 1
print tabela
raw_input('Aperte ENTER para mostrar o numero de candidatos eleitos em cada partido')
tabela = PrettyTable(['Partido', 'N'])
for partido in partidos: tabela.add_row([partido, partidos[partido]])
tabela.sortby = 'N'
print tabela
| mit |
tyler6389/count_kernel_grand | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
shashank971/edx-platform | cms/djangoapps/contentstore/tests/test_import_pure_xblock.py | 162 | 3002 | """
Integration tests for importing courses containing pure XBlocks.
"""
from xblock.core import XBlock
from xblock.fields import String
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.mongo.draft import as_draft
from django.conf import settings
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class StubXBlock(XBlock):
"""
Stub XBlock to use in tests.
The default XBlock implementation will load this XBlock
from XML, using the lowercase version of the class
as an element name ("stubxblock") and the field names
as attributes of that element.
Example:
<stubxblock test_field="this is only a test" />
"""
test_field = String(default="default")
class XBlockImportTest(ModuleStoreTestCase):
@XBlock.register_temp_plugin(StubXBlock)
def test_import_public(self):
self._assert_import(
'pure_xblock_public',
'set by xml'
)
@XBlock.register_temp_plugin(StubXBlock)
def test_import_draft(self):
self._assert_import(
'pure_xblock_draft',
'set by xml',
has_draft=True
)
def _assert_import(self, course_dir, expected_field_val, has_draft=False):
"""
Import a course from XML, then verify that the XBlock was loaded
with the correct field value.
Args:
course_dir (str): The name of the course directory (relative to the test data directory)
expected_xblock_loc (str): The location of the XBlock in the course.
expected_field_val (str): The expected value of the XBlock's test field.
Kwargs:
has_draft (bool): If true, check that a draft of the XBlock exists with
the expected field value set.
"""
# It is necessary to use the "old mongo" modulestore because split doesn't work
# with the "has_draft" logic below.
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
courses = import_course_from_xml(
store, self.user.id, TEST_DATA_DIR, [course_dir], create_if_not_present=True
)
xblock_location = courses[0].id.make_usage_key('stubxblock', 'xblock_test')
if has_draft:
xblock_location = as_draft(xblock_location)
xblock = store.get_item(xblock_location)
self.assertTrue(isinstance(xblock, StubXBlock))
self.assertEqual(xblock.test_field, expected_field_val)
if has_draft:
draft_xblock = store.get_item(xblock_location)
self.assertTrue(getattr(draft_xblock, 'is_draft', False))
self.assertTrue(isinstance(draft_xblock, StubXBlock))
self.assertEqual(draft_xblock.test_field, expected_field_val)
| agpl-3.0 |
Carreau/python-prompt-toolkit | prompt_toolkit/layout/utils.py | 2 | 1073 | from __future__ import unicode_literals
__all__ = (
'TokenList',
)
class TokenList(object):
"""
Wrapper around (Token, text) tuples.
Implements logical slice and len operations.
"""
def __init__(self, iterator=None):
if iterator is not None:
self._list = list(iterator)
else:
self._list = []
def __len__(self):
return sum(len(v) for k, v in self._list)
def __getitem__(self, val):
result = []
for token, string in self._list:
for c in string:
result.append((token, c))
if isinstance(val, slice):
return TokenList(result[val])
else:
return result[val]
def __iter__(self):
return iter(self._list)
def append(self, val):
self._list.append(val)
def __add__(self, other):
return TokenList(self._list + list(other))
@property
def text(self):
return ''.join(p[1] for p in self._list)
def __repr__(self):
return 'TokenList(%r)' % self._list
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow/source/google/protobuf/internal/file_options_test_pb2.py | 4 | 3021 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/file_options_test.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/file_options_test.proto',
package='google.protobuf.python.internal',
syntax='proto2',
serialized_pb=_b('\n0google/protobuf/internal/file_options_test.proto\x12\x1fgoogle.protobuf.python.internal\x1a google/protobuf/descriptor.proto\"\x1e\n\nFooOptions\x12\x10\n\x08\x66oo_name\x18\x01 \x01(\t:a\n\x0b\x66oo_options\x12\x1c.google.protobuf.FileOptions\x18\xac\xec\xb6\x39 \x01(\x0b\x32+.google.protobuf.python.internal.FooOptions')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
FOO_OPTIONS_FIELD_NUMBER = 120436268
foo_options = _descriptor.FieldDescriptor(
name='foo_options', full_name='google.protobuf.python.internal.foo_options', index=0,
number=120436268, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_FOOOPTIONS = _descriptor.Descriptor(
name='FooOptions',
full_name='google.protobuf.python.internal.FooOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='foo_name', full_name='google.protobuf.python.internal.FooOptions.foo_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=149,
)
DESCRIPTOR.message_types_by_name['FooOptions'] = _FOOOPTIONS
DESCRIPTOR.extensions_by_name['foo_options'] = foo_options
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FooOptions = _reflection.GeneratedProtocolMessageType('FooOptions', (_message.Message,), dict(
DESCRIPTOR = _FOOOPTIONS,
__module__ = 'google.protobuf.internal.file_options_test_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.python.internal.FooOptions)
))
_sym_db.RegisterMessage(FooOptions)
foo_options.message_type = _FOOOPTIONS
google_dot_protobuf_dot_descriptor__pb2.FileOptions.RegisterExtension(foo_options)
# @@protoc_insertion_point(module_scope)
| mit |
pigletto/django-lfs | lfs/net_price/__init__.py | 2 | 1258 | # lfs imports
from lfs.plugins import PriceCalculator
class NetPriceCalculator(PriceCalculator):
"""
The value of product.price stored in the database excludes tax, in other
words, the stored price is the net price of the product.
See lfs.plugins.PriceCalculator for more information.
"""
def get_price_net(self, with_properties=True, amount=1):
return self.get_price(with_properties, amount)
def get_price_gross(self, with_properties=True, amount=1):
return self.get_price_net(with_properties, amount) * self._calc_customer_tax_rate()
def get_standard_price_net(self, with_properties=True, amount=1):
return self.get_standard_price(with_properties, amount)
def get_standard_price_gross(self, with_properties=True, amount=1):
return self.get_standard_price_net(with_properties, amount) * self._calc_customer_tax_rate()
def get_for_sale_price_net(self, with_properties=True, amount=1):
return self.get_for_sale_price(with_properties, amount)
def get_for_sale_price_gross(self, with_properties=True, amount=1):
return self.get_for_sale_price_net(with_properties, amount) * self._calc_customer_tax_rate()
def price_includes_tax(self):
return False
| bsd-3-clause |
thaim/ansible | lib/ansible/plugins/action/fail.py | 122 | 1477 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('msg',))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
| mit |
mlhenderson/ui-common | functional-site/js/angular-1.2.14/docs/components/bootstrap-3.1.1/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
GauravSahu/odoo | addons/hr_payroll/hr_payroll.py | 144 | 49776 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date
from datetime import datetime
from datetime import timedelta
from dateutil import relativedelta
from openerp import api, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.tools.safe_eval import safe_eval as eval
class hr_payroll_structure(osv.osv):
"""
Salary structure used to defined
- Basic
- Allowances
- Deductions
"""
_name = 'hr.payroll.structure'
_description = 'Salary Structure'
_columns = {
'name':fields.char('Name', required=True),
'code':fields.char('Reference', size=64, required=True),
'company_id':fields.many2one('res.company', 'Company', required=True, copy=False),
'note': fields.text('Description'),
'parent_id':fields.many2one('hr.payroll.structure', 'Parent'),
'children_ids':fields.one2many('hr.payroll.structure', 'parent_id', 'Children', copy=True),
'rule_ids':fields.many2many('hr.salary.rule', 'hr_structure_salary_rule_rel', 'struct_id', 'rule_id', 'Salary Rules'),
}
def _get_parent(self, cr, uid, context=None):
obj_model = self.pool.get('ir.model.data')
res = False
data_id = obj_model.search(cr, uid, [('model', '=', 'hr.payroll.structure'), ('name', '=', 'structure_base')])
if data_id:
res = obj_model.browse(cr, uid, data_id[0], context=context).res_id
return res
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'parent_id': _get_parent,
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create a recursive Salary Structure.', ['parent_id'])
]
def copy(self, cr, uid, id, default=None, context=None):
default = dict(default or {},
code=_("%s (copy)") % (self.browse(cr, uid, id, context=context).code))
return super(hr_payroll_structure, self).copy(cr, uid, id, default, context=context)
@api.cr_uid_ids_context
def get_all_rules(self, cr, uid, structure_ids, context=None):
"""
@param structure_ids: list of structure
@return: returns a list of tuple (id, sequence) of rules that are maybe to apply
"""
all_rules = []
for struct in self.browse(cr, uid, structure_ids, context=context):
all_rules += self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, struct.rule_ids, context=context)
return all_rules
@api.cr_uid_ids_context
def _get_parent_structure(self, cr, uid, struct_ids, context=None):
if not struct_ids:
return []
parent = []
for struct in self.browse(cr, uid, struct_ids, context=context):
if struct.parent_id:
parent.append(struct.parent_id.id)
if parent:
parent = self._get_parent_structure(cr, uid, parent, context)
return parent + struct_ids
class hr_contract(osv.osv):
"""
Employee contract based on the visa, work permits
allows to configure different Salary structure
"""
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'struct_id': fields.many2one('hr.payroll.structure', 'Salary Structure'),
'schedule_pay': fields.selection([
('monthly', 'Monthly'),
('quarterly', 'Quarterly'),
('semi-annually', 'Semi-annually'),
('annually', 'Annually'),
('weekly', 'Weekly'),
('bi-weekly', 'Bi-weekly'),
('bi-monthly', 'Bi-monthly'),
], 'Scheduled Pay', select=True),
}
_defaults = {
'schedule_pay': 'monthly',
}
@api.cr_uid_ids_context
def get_all_structures(self, cr, uid, contract_ids, context=None):
"""
@param contract_ids: list of contracts
@return: the structures linked to the given contracts, ordered by hierachy (parent=False first, then first level children and so on) and without duplicata
"""
structure_ids = [contract.struct_id.id for contract in self.browse(cr, uid, contract_ids, context=context) if contract.struct_id]
if not structure_ids:
return []
return list(set(self.pool.get('hr.payroll.structure')._get_parent_structure(cr, uid, structure_ids, context=context)))
class contrib_register(osv.osv):
'''
Contribution Register
'''
_name = 'hr.contribution.register'
_description = 'Contribution Register'
_columns = {
'company_id':fields.many2one('res.company', 'Company'),
'partner_id':fields.many2one('res.partner', 'Partner'),
'name':fields.char('Name', required=True, readonly=False),
'register_line_ids':fields.one2many('hr.payslip.line', 'register_id', 'Register Line', readonly=True),
'note': fields.text('Description'),
}
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
class hr_salary_rule_category(osv.osv):
"""
HR Salary Rule Category
"""
_name = 'hr.salary.rule.category'
_description = 'Salary Rule Category'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'code':fields.char('Code', size=64, required=True, readonly=False),
'parent_id':fields.many2one('hr.salary.rule.category', 'Parent', help="Linking a salary category to its parent is used only for the reporting purpose."),
'children_ids': fields.one2many('hr.salary.rule.category', 'parent_id', 'Children'),
'note': fields.text('Description'),
'company_id':fields.many2one('res.company', 'Company', required=False),
}
_defaults = {
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
if not values:
values = {}
res = {}
for id in ids:
res[id] = []
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids), ('appears_on_payslip', '=', True)], limit=self._limit)
for r in obj.pool[self._obj].read(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
key = r[self._fields_id]
if isinstance(key, tuple):
# Read return a tuple in the case where the field is a many2one
# but we want to get the id of this field.
key = key[0]
res[key].append( r['id'] )
return res
class hr_payslip_run(osv.osv):
_name = 'hr.payslip.run'
_description = 'Payslip Batches'
_columns = {
'name': fields.char('Name', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'slip_ids': fields.one2many('hr.payslip', 'payslip_run_id', 'Payslips', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Draft'),
('close', 'Close'),
], 'Status', select=True, readonly=True, copy=False),
'date_start': fields.date('Date From', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.date('Date To', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'credit_note': fields.boolean('Credit Note', readonly=True, states={'draft': [('readonly', False)]}, help="If its checked, indicates that all payslips generated from here are refund payslips."),
}
_defaults = {
'state': 'draft',
'date_start': lambda *a: time.strftime('%Y-%m-01'),
'date_end': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def draft_payslip_run(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def close_payslip_run(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_name = 'hr.payslip'
_description = 'Pay Slip'
def _get_lines_salary_rule_category(self, cr, uid, ids, field_names, arg=None, context=None):
result = {}
if not ids: return result
for id in ids:
result.setdefault(id, [])
cr.execute('''SELECT pl.slip_id, pl.id FROM hr_payslip_line AS pl \
LEFT JOIN hr_salary_rule_category AS sh on (pl.category_id = sh.id) \
WHERE pl.slip_id in %s \
GROUP BY pl.slip_id, pl.sequence, pl.id ORDER BY pl.sequence''',(tuple(ids),))
res = cr.fetchall()
for r in res:
result[r[0]].append(r[1])
return result
def _count_detail_payslip(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for details in self.browse(cr, uid, ids, context=context):
res[details.id] = len(details.line_ids)
return res
_columns = {
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True, states={'draft': [('readonly', False)]}, help='Defines the rules that have to be applied to this payslip, accordingly to the contract chosen. If you let empty the field contract, this field isn\'t mandatory anymore and thus the rules applied will be all the rules set on the structure of all contracts of the employee valid for the chosen period'),
'name': fields.char('Payslip Name', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'number': fields.char('Reference', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_from': fields.date('Date From', readonly=True, states={'draft': [('readonly', False)]}, required=True),
'date_to': fields.date('Date To', readonly=True, states={'draft': [('readonly', False)]}, required=True),
'state': fields.selection([
('draft', 'Draft'),
('verify', 'Waiting'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', select=True, readonly=True, copy=False,
help='* When the payslip is created the status is \'Draft\'.\
\n* If the payslip is under verification, the status is \'Waiting\'. \
\n* If the payslip is confirmed then status is set to \'Done\'.\
\n* When user cancel payslip the status is \'Rejected\'.'),
'line_ids': one2many_mod2('hr.payslip.line', 'slip_id', 'Payslip Lines', readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'worked_days_line_ids': fields.one2many('hr.payslip.worked_days', 'payslip_id', 'Payslip Worked Days', copy=True, required=False, readonly=True, states={'draft': [('readonly', False)]}),
'input_line_ids': fields.one2many('hr.payslip.input', 'payslip_id', 'Payslip Inputs', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'paid': fields.boolean('Made Payment Order ? ', required=False, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'note': fields.text('Internal Note', readonly=True, states={'draft':[('readonly',False)]}),
'contract_id': fields.many2one('hr.contract', 'Contract', required=False, readonly=True, states={'draft': [('readonly', False)]}),
'details_by_salary_rule_category': fields.function(_get_lines_salary_rule_category, method=True, type='one2many', relation='hr.payslip.line', string='Details by Salary Rule Category'),
'credit_note': fields.boolean('Credit Note', help="Indicates this payslip has a refund of another", readonly=True, states={'draft': [('readonly', False)]}),
'payslip_run_id': fields.many2one('hr.payslip.run', 'Payslip Batches', readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'payslip_count': fields.function(_count_detail_payslip, type='integer', string="Payslip Computation Details"),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
'state': 'draft',
'credit_note': False,
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
}
def _check_dates(self, cr, uid, ids, context=None):
for payslip in self.browse(cr, uid, ids, context=context):
if payslip.date_from > payslip.date_to:
return False
return True
_constraints = [(_check_dates, "Payslip 'Date From' must be before 'Date To'.", ['date_from', 'date_to'])]
def cancel_sheet(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def process_sheet(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'paid': True, 'state': 'done'}, context=context)
def hr_verify_sheet(self, cr, uid, ids, context=None):
self.compute_sheet(cr, uid, ids, context)
return self.write(cr, uid, ids, {'state': 'verify'}, context=context)
def refund_sheet(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
for payslip in self.browse(cr, uid, ids, context=context):
id_copy = self.copy(cr, uid, payslip.id, {'credit_note': True, 'name': _('Refund: ')+payslip.name}, context=context)
self.signal_workflow(cr, uid, [id_copy], 'hr_verify_sheet')
self.signal_workflow(cr, uid, [id_copy], 'process_sheet')
form_id = mod_obj.get_object_reference(cr, uid, 'hr_payroll', 'view_hr_payslip_form')
form_res = form_id and form_id[1] or False
tree_id = mod_obj.get_object_reference(cr, uid, 'hr_payroll', 'view_hr_payslip_tree')
tree_res = tree_id and tree_id[1] or False
return {
'name':_("Refund Payslip"),
'view_mode': 'tree, form',
'view_id': False,
'view_type': 'form',
'res_model': 'hr.payslip',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'domain': "[('id', 'in', %s)]" % [id_copy],
'views': [(tree_res, 'tree'), (form_res, 'form')],
'context': {}
}
def check_done(self, cr, uid, ids, context=None):
return True
def unlink(self, cr, uid, ids, context=None):
for payslip in self.browse(cr, uid, ids, context=context):
if payslip.state not in ['draft','cancel']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a payslip which is not draft or cancelled!'))
return super(hr_payslip, self).unlink(cr, uid, ids, context)
#TODO move this function into hr_contract module, on hr.employee object
def get_contract(self, cr, uid, employee, date_from, date_to, context=None):
"""
@param employee: browse record of employee
@param date_from: date field
@param date_to: date field
@return: returns the ids of all the contracts for the given employee that need to be considered for the given dates
"""
contract_obj = self.pool.get('hr.contract')
clause = []
#a contract is valid if it ends between the given dates
clause_1 = ['&',('date_end', '<=', date_to),('date_end','>=', date_from)]
#OR if it starts between the given dates
clause_2 = ['&',('date_start', '<=', date_to),('date_start','>=', date_from)]
#OR if it starts before the date_from and finish after the date_end (or never finish)
clause_3 = ['&',('date_start','<=', date_from),'|',('date_end', '=', False),('date_end','>=', date_to)]
clause_final = [('employee_id', '=', employee.id),'|','|'] + clause_1 + clause_2 + clause_3
contract_ids = contract_obj.search(cr, uid, clause_final, context=context)
return contract_ids
def compute_sheet(self, cr, uid, ids, context=None):
slip_line_pool = self.pool.get('hr.payslip.line')
sequence_obj = self.pool.get('ir.sequence')
for payslip in self.browse(cr, uid, ids, context=context):
number = payslip.number or sequence_obj.get(cr, uid, 'salary.slip')
#delete old payslip lines
old_slipline_ids = slip_line_pool.search(cr, uid, [('slip_id', '=', payslip.id)], context=context)
# old_slipline_ids
if old_slipline_ids:
slip_line_pool.unlink(cr, uid, old_slipline_ids, context=context)
if payslip.contract_id:
#set the list of contract for which the rules have to be applied
contract_ids = [payslip.contract_id.id]
else:
#if we don't give the contract, then the rules to apply should be for all current contracts of the employee
contract_ids = self.get_contract(cr, uid, payslip.employee_id, payslip.date_from, payslip.date_to, context=context)
lines = [(0,0,line) for line in self.pool.get('hr.payslip').get_payslip_lines(cr, uid, contract_ids, payslip.id, context=context)]
self.write(cr, uid, [payslip.id], {'line_ids': lines, 'number': number,}, context=context)
return True
def get_worked_day_lines(self, cr, uid, contract_ids, date_from, date_to, context=None):
"""
@param contract_ids: list of contract id
@return: returns a list of dict containing the input that should be applied for the given contract between date_from and date_to
"""
def was_on_leave(employee_id, datetime_day, context=None):
res = False
day = datetime_day.strftime("%Y-%m-%d")
holiday_ids = self.pool.get('hr.holidays').search(cr, uid, [('state','=','validate'),('employee_id','=',employee_id),('type','=','remove'),('date_from','<=',day),('date_to','>=',day)])
if holiday_ids:
res = self.pool.get('hr.holidays').browse(cr, uid, holiday_ids, context=context)[0].holiday_status_id.name
return res
res = []
for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):
if not contract.working_hours:
#fill only if the contract as a working schedule linked
continue
attendances = {
'name': _("Normal Working Days paid at 100%"),
'sequence': 1,
'code': 'WORK100',
'number_of_days': 0.0,
'number_of_hours': 0.0,
'contract_id': contract.id,
}
leaves = {}
day_from = datetime.strptime(date_from,"%Y-%m-%d")
day_to = datetime.strptime(date_to,"%Y-%m-%d")
nb_of_days = (day_to - day_from).days + 1
for day in range(0, nb_of_days):
working_hours_on_day = self.pool.get('resource.calendar').working_hours_on_day(cr, uid, contract.working_hours, day_from + timedelta(days=day), context)
if working_hours_on_day:
#the employee had to work
leave_type = was_on_leave(contract.employee_id.id, day_from + timedelta(days=day), context=context)
if leave_type:
#if he was on leave, fill the leaves dict
if leave_type in leaves:
leaves[leave_type]['number_of_days'] += 1.0
leaves[leave_type]['number_of_hours'] += working_hours_on_day
else:
leaves[leave_type] = {
'name': leave_type,
'sequence': 5,
'code': leave_type,
'number_of_days': 1.0,
'number_of_hours': working_hours_on_day,
'contract_id': contract.id,
}
else:
#add the input vals to tmp (increment if existing)
attendances['number_of_days'] += 1.0
attendances['number_of_hours'] += working_hours_on_day
leaves = [value for key,value in leaves.items()]
res += [attendances] + leaves
return res
def get_inputs(self, cr, uid, contract_ids, date_from, date_to, context=None):
res = []
contract_obj = self.pool.get('hr.contract')
rule_obj = self.pool.get('hr.salary.rule')
structure_ids = contract_obj.get_all_structures(cr, uid, contract_ids, context=context)
rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)
sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]
for contract in contract_obj.browse(cr, uid, contract_ids, context=context):
for rule in rule_obj.browse(cr, uid, sorted_rule_ids, context=context):
if rule.input_ids:
for input in rule.input_ids:
inputs = {
'name': input.name,
'code': input.code,
'contract_id': contract.id,
}
res += [inputs]
return res
def get_payslip_lines(self, cr, uid, contract_ids, payslip_id, context):
def _sum_salary_rule_category(localdict, category, amount):
if category.parent_id:
localdict = _sum_salary_rule_category(localdict, category.parent_id, amount)
localdict['categories'].dict[category.code] = category.code in localdict['categories'].dict and localdict['categories'].dict[category.code] + amount or amount
return localdict
class BrowsableObject(object):
def __init__(self, pool, cr, uid, employee_id, dict):
self.pool = pool
self.cr = cr
self.uid = uid
self.employee_id = employee_id
self.dict = dict
def __getattr__(self, attr):
return attr in self.dict and self.dict.__getitem__(attr) or 0.0
class InputLine(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
result = 0.0
self.cr.execute("SELECT sum(amount) as sum\
FROM hr_payslip as hp, hr_payslip_input as pi \
WHERE hp.employee_id = %s AND hp.state = 'done' \
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s",
(self.employee_id, from_date, to_date, code))
res = self.cr.fetchone()[0]
return res or 0.0
class WorkedDays(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def _sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
result = 0.0
self.cr.execute("SELECT sum(number_of_days) as number_of_days, sum(number_of_hours) as number_of_hours\
FROM hr_payslip as hp, hr_payslip_worked_days as pi \
WHERE hp.employee_id = %s AND hp.state = 'done'\
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pi.payslip_id AND pi.code = %s",
(self.employee_id, from_date, to_date, code))
return self.cr.fetchone()
def sum(self, code, from_date, to_date=None):
res = self._sum(code, from_date, to_date)
return res and res[0] or 0.0
def sum_hours(self, code, from_date, to_date=None):
res = self._sum(code, from_date, to_date)
return res and res[1] or 0.0
class Payslips(BrowsableObject):
"""a class that will be used into the python code, mainly for usability purposes"""
def sum(self, code, from_date, to_date=None):
if to_date is None:
to_date = datetime.now().strftime('%Y-%m-%d')
self.cr.execute("SELECT sum(case when hp.credit_note = False then (pl.total) else (-pl.total) end)\
FROM hr_payslip as hp, hr_payslip_line as pl \
WHERE hp.employee_id = %s AND hp.state = 'done' \
AND hp.date_from >= %s AND hp.date_to <= %s AND hp.id = pl.slip_id AND pl.code = %s",
(self.employee_id, from_date, to_date, code))
res = self.cr.fetchone()
return res and res[0] or 0.0
#we keep a dict with the result because a value can be overwritten by another rule with the same code
result_dict = {}
rules = {}
categories_dict = {}
blacklist = []
payslip_obj = self.pool.get('hr.payslip')
inputs_obj = self.pool.get('hr.payslip.worked_days')
obj_rule = self.pool.get('hr.salary.rule')
payslip = payslip_obj.browse(cr, uid, payslip_id, context=context)
worked_days = {}
for worked_days_line in payslip.worked_days_line_ids:
worked_days[worked_days_line.code] = worked_days_line
inputs = {}
for input_line in payslip.input_line_ids:
inputs[input_line.code] = input_line
categories_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, categories_dict)
input_obj = InputLine(self.pool, cr, uid, payslip.employee_id.id, inputs)
worked_days_obj = WorkedDays(self.pool, cr, uid, payslip.employee_id.id, worked_days)
payslip_obj = Payslips(self.pool, cr, uid, payslip.employee_id.id, payslip)
rules_obj = BrowsableObject(self.pool, cr, uid, payslip.employee_id.id, rules)
baselocaldict = {'categories': categories_obj, 'rules': rules_obj, 'payslip': payslip_obj, 'worked_days': worked_days_obj, 'inputs': input_obj}
#get the ids of the structures on the contracts and their parent id as well
structure_ids = self.pool.get('hr.contract').get_all_structures(cr, uid, contract_ids, context=context)
#get the rules of the structure and thier children
rule_ids = self.pool.get('hr.payroll.structure').get_all_rules(cr, uid, structure_ids, context=context)
#run the rules by sequence
sorted_rule_ids = [id for id, sequence in sorted(rule_ids, key=lambda x:x[1])]
for contract in self.pool.get('hr.contract').browse(cr, uid, contract_ids, context=context):
employee = contract.employee_id
localdict = dict(baselocaldict, employee=employee, contract=contract)
for rule in obj_rule.browse(cr, uid, sorted_rule_ids, context=context):
key = rule.code + '-' + str(contract.id)
localdict['result'] = None
localdict['result_qty'] = 1.0
localdict['result_rate'] = 100
#check if the rule can be applied
if obj_rule.satisfy_condition(cr, uid, rule.id, localdict, context=context) and rule.id not in blacklist:
#compute the amount of the rule
amount, qty, rate = obj_rule.compute_rule(cr, uid, rule.id, localdict, context=context)
#check if there is already a rule computed with that code
previous_amount = rule.code in localdict and localdict[rule.code] or 0.0
#set/overwrite the amount computed for this rule in the localdict
tot_rule = amount * qty * rate / 100.0
localdict[rule.code] = tot_rule
rules[rule.code] = rule
#sum the amount for its salary category
localdict = _sum_salary_rule_category(localdict, rule.category_id, tot_rule - previous_amount)
#create/overwrite the rule in the temporary results
result_dict[key] = {
'salary_rule_id': rule.id,
'contract_id': contract.id,
'name': rule.name,
'code': rule.code,
'category_id': rule.category_id.id,
'sequence': rule.sequence,
'appears_on_payslip': rule.appears_on_payslip,
'condition_select': rule.condition_select,
'condition_python': rule.condition_python,
'condition_range': rule.condition_range,
'condition_range_min': rule.condition_range_min,
'condition_range_max': rule.condition_range_max,
'amount_select': rule.amount_select,
'amount_fix': rule.amount_fix,
'amount_python_compute': rule.amount_python_compute,
'amount_percentage': rule.amount_percentage,
'amount_percentage_base': rule.amount_percentage_base,
'register_id': rule.register_id.id,
'amount': amount,
'employee_id': contract.employee_id.id,
'quantity': qty,
'rate': rate,
}
else:
#blacklist this rule and its children
blacklist += [id for id, seq in self.pool.get('hr.salary.rule')._recursive_search_of_rules(cr, uid, [rule], context=context)]
result = [value for code, value in result_dict.items()]
return result
def onchange_employee_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
empolyee_obj = self.pool.get('hr.employee')
contract_obj = self.pool.get('hr.contract')
worked_days_obj = self.pool.get('hr.payslip.worked_days')
input_obj = self.pool.get('hr.payslip.input')
if context is None:
context = {}
#delete old worked days lines
old_worked_days_ids = ids and worked_days_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False
if old_worked_days_ids:
worked_days_obj.unlink(cr, uid, old_worked_days_ids, context=context)
#delete old input lines
old_input_ids = ids and input_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False
if old_input_ids:
input_obj.unlink(cr, uid, old_input_ids, context=context)
#defaults
res = {'value':{
'line_ids':[],
'input_line_ids': [],
'worked_days_line_ids': [],
#'details_by_salary_head':[], TODO put me back
'name':'',
'contract_id': False,
'struct_id': False,
}
}
if (not employee_id) or (not date_from) or (not date_to):
return res
ttyme = datetime.fromtimestamp(time.mktime(time.strptime(date_from, "%Y-%m-%d")))
employee_id = empolyee_obj.browse(cr, uid, employee_id, context=context)
res['value'].update({
'name': _('Salary Slip of %s for %s') % (employee_id.name, tools.ustr(ttyme.strftime('%B-%Y'))),
'company_id': employee_id.company_id.id
})
if not context.get('contract', False):
#fill with the first contract of the employee
contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context)
else:
if contract_id:
#set the list of contract for which the input have to be filled
contract_ids = [contract_id]
else:
#if we don't give the contract, then the input to fill should be for all current contracts of the employee
contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context)
if not contract_ids:
return res
contract_record = contract_obj.browse(cr, uid, contract_ids[0], context=context)
res['value'].update({
'contract_id': contract_record and contract_record.id or False
})
struct_record = contract_record and contract_record.struct_id or False
if not struct_record:
return res
res['value'].update({
'struct_id': struct_record.id,
})
#computation of the salary input
worked_days_line_ids = self.get_worked_day_lines(cr, uid, contract_ids, date_from, date_to, context=context)
input_line_ids = self.get_inputs(cr, uid, contract_ids, date_from, date_to, context=context)
res['value'].update({
'worked_days_line_ids': worked_days_line_ids,
'input_line_ids': input_line_ids,
})
return res
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
#TODO it seems to be the mess in the onchanges, we should have onchange_employee => onchange_contract => doing all the things
res = {'value':{
'line_ids': [],
'name': '',
}
}
context = dict(context or {}, contract=True)
if not contract_id:
res['value'].update({'struct_id': False})
return self.onchange_employee_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
class hr_payslip_worked_days(osv.osv):
'''
Payslip Worked Days
'''
_name = 'hr.payslip.worked_days'
_description = 'Payslip Worked Days'
_columns = {
'name': fields.char('Description', required=True),
'payslip_id': fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade', select=True),
'sequence': fields.integer('Sequence', required=True, select=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'number_of_days': fields.float('Number of Days'),
'number_of_hours': fields.float('Number of Hours'),
'contract_id': fields.many2one('hr.contract', 'Contract', required=True, help="The contract for which applied this input"),
}
_order = 'payslip_id, sequence'
_defaults = {
'sequence': 10,
}
class hr_payslip_input(osv.osv):
'''
Payslip Input
'''
_name = 'hr.payslip.input'
_description = 'Payslip Input'
_columns = {
'name': fields.char('Description', required=True),
'payslip_id': fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade', select=True),
'sequence': fields.integer('Sequence', required=True, select=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'amount': fields.float('Amount', help="It is used in computation. For e.g. A rule for sales having 1% commission of basic salary for per product can defined in expression like result = inputs.SALEURO.amount * contract.wage*0.01."),
'contract_id': fields.many2one('hr.contract', 'Contract', required=True, help="The contract for which applied this input"),
}
_order = 'payslip_id, sequence'
_defaults = {
'sequence': 10,
'amount': 0.0,
}
class hr_salary_rule(osv.osv):
_name = 'hr.salary.rule'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'code':fields.char('Code', size=64, required=True, help="The code of salary rules can be used as reference in computation of other rules. In that case, it is case sensitive."),
'sequence': fields.integer('Sequence', required=True, help='Use to arrange calculation sequence', select=True),
'quantity': fields.char('Quantity', help="It is used in computation for percentage and fixed amount.For e.g. A rule for Meal Voucher having fixed amount of 1€ per worked day can have its quantity defined in expression like worked_days.WORK100.number_of_days."),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', required=True),
'active':fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the salary rule without removing it."),
'appears_on_payslip': fields.boolean('Appears on Payslip', help="Used to display the salary rule on payslip."),
'parent_rule_id':fields.many2one('hr.salary.rule', 'Parent Salary Rule', select=True),
'company_id':fields.many2one('res.company', 'Company', required=False),
'condition_select': fields.selection([('none', 'Always True'),('range', 'Range'), ('python', 'Python Expression')], "Condition Based on", required=True),
'condition_range':fields.char('Range Based on', readonly=False, help='This will be used to compute the % fields values; in general it is on basic, but you can also use categories code fields in lowercase as a variable names (hra, ma, lta, etc.) and the variable basic.'),
'condition_python':fields.text('Python Condition', required=True, readonly=False, help='Applied this rule for calculation if condition is true. You can specify condition like basic > 1000.'),
'condition_range_min': fields.float('Minimum Range', required=False, help="The minimum amount, applied for this rule."),
'condition_range_max': fields.float('Maximum Range', required=False, help="The maximum amount, applied for this rule."),
'amount_select':fields.selection([
('percentage','Percentage (%)'),
('fix','Fixed Amount'),
('code','Python Code'),
],'Amount Type', select=True, required=True, help="The computation method for the rule amount."),
'amount_fix': fields.float('Fixed Amount', digits_compute=dp.get_precision('Payroll'),),
'amount_percentage': fields.float('Percentage (%)', digits_compute=dp.get_precision('Payroll Rate'), help='For example, enter 50.0 to apply a percentage of 50%'),
'amount_python_compute':fields.text('Python Code'),
'amount_percentage_base': fields.char('Percentage based on', required=False, readonly=False, help='result will be affected to a variable'),
'child_ids':fields.one2many('hr.salary.rule', 'parent_rule_id', 'Child Salary Rule', copy=True),
'register_id':fields.many2one('hr.contribution.register', 'Contribution Register', help="Eventual third party involved in the salary payment of the employees."),
'input_ids': fields.one2many('hr.rule.input', 'input_id', 'Inputs', copy=True),
'note':fields.text('Description'),
}
_defaults = {
'amount_python_compute': '''
# Available variables:
#----------------------
# payslip: object containing the payslips
# employee: hr.employee object
# contract: hr.contract object
# rules: object containing the rules code (previously computed)
# categories: object containing the computed salary rule categories (sum of amount of all rules belonging to that category).
# worked_days: object containing the computed worked days.
# inputs: object containing the computed inputs.
# Note: returned value have to be set in the variable 'result'
result = contract.wage * 0.10''',
'condition_python':
'''
# Available variables:
#----------------------
# payslip: object containing the payslips
# employee: hr.employee object
# contract: hr.contract object
# rules: object containing the rules code (previously computed)
# categories: object containing the computed salary rule categories (sum of amount of all rules belonging to that category).
# worked_days: object containing the computed worked days
# inputs: object containing the computed inputs
# Note: returned value have to be set in the variable 'result'
result = rules.NET > categories.NET * 0.10''',
'condition_range': 'contract.wage',
'sequence': 5,
'appears_on_payslip': True,
'active': True,
'company_id': lambda self, cr, uid, context: \
self.pool.get('res.users').browse(cr, uid, uid,
context=context).company_id.id,
'condition_select': 'none',
'amount_select': 'fix',
'amount_fix': 0.0,
'amount_percentage': 0.0,
'quantity': '1.0',
}
@api.cr_uid_ids_context
def _recursive_search_of_rules(self, cr, uid, rule_ids, context=None):
"""
@param rule_ids: list of browse record
@return: returns a list of tuple (id, sequence) which are all the children of the passed rule_ids
"""
children_rules = []
for rule in rule_ids:
if rule.child_ids:
children_rules += self._recursive_search_of_rules(cr, uid, rule.child_ids, context=context)
return [(r.id, r.sequence) for r in rule_ids] + children_rules
#TODO should add some checks on the type of result (should be float)
def compute_rule(self, cr, uid, rule_id, localdict, context=None):
"""
:param rule_id: id of rule to compute
:param localdict: dictionary containing the environement in which to compute the rule
:return: returns a tuple build as the base/amount computed, the quantity and the rate
:rtype: (float, float, float)
"""
rule = self.browse(cr, uid, rule_id, context=context)
if rule.amount_select == 'fix':
try:
return rule.amount_fix, float(eval(rule.quantity, localdict)), 100.0
except:
raise osv.except_osv(_('Error!'), _('Wrong quantity defined for salary rule %s (%s).')% (rule.name, rule.code))
elif rule.amount_select == 'percentage':
try:
return (float(eval(rule.amount_percentage_base, localdict)),
float(eval(rule.quantity, localdict)),
rule.amount_percentage)
except:
raise osv.except_osv(_('Error!'), _('Wrong percentage base or quantity defined for salary rule %s (%s).')% (rule.name, rule.code))
else:
try:
eval(rule.amount_python_compute, localdict, mode='exec', nocopy=True)
return float(localdict['result']), 'result_qty' in localdict and localdict['result_qty'] or 1.0, 'result_rate' in localdict and localdict['result_rate'] or 100.0
except:
raise osv.except_osv(_('Error!'), _('Wrong python code defined for salary rule %s (%s).')% (rule.name, rule.code))
def satisfy_condition(self, cr, uid, rule_id, localdict, context=None):
"""
@param rule_id: id of hr.salary.rule to be tested
@param contract_id: id of hr.contract to be tested
@return: returns True if the given rule match the condition for the given contract. Return False otherwise.
"""
rule = self.browse(cr, uid, rule_id, context=context)
if rule.condition_select == 'none':
return True
elif rule.condition_select == 'range':
try:
result = eval(rule.condition_range, localdict)
return rule.condition_range_min <= result and result <= rule.condition_range_max or False
except:
raise osv.except_osv(_('Error!'), _('Wrong range condition defined for salary rule %s (%s).')% (rule.name, rule.code))
else: #python code
try:
eval(rule.condition_python, localdict, mode='exec', nocopy=True)
return 'result' in localdict and localdict['result'] or False
except:
raise osv.except_osv(_('Error!'), _('Wrong python condition defined for salary rule %s (%s).')% (rule.name, rule.code))
class hr_rule_input(osv.osv):
'''
Salary Rule Input
'''
_name = 'hr.rule.input'
_description = 'Salary Rule Input'
_columns = {
'name': fields.char('Description', required=True),
'code': fields.char('Code', size=52, required=True, help="The code that can be used in the salary rules"),
'input_id': fields.many2one('hr.salary.rule', 'Salary Rule Input', required=True)
}
class hr_payslip_line(osv.osv):
'''
Payslip Line
'''
_name = 'hr.payslip.line'
_inherit = 'hr.salary.rule'
_description = 'Payslip Line'
_order = 'contract_id, sequence'
def _calculate_total(self, cr, uid, ids, name, args, context):
if not ids: return {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = float(line.quantity) * line.amount * line.rate / 100
return res
_columns = {
'slip_id':fields.many2one('hr.payslip', 'Pay Slip', required=True, ondelete='cascade'),
'salary_rule_id':fields.many2one('hr.salary.rule', 'Rule', required=True),
'employee_id':fields.many2one('hr.employee', 'Employee', required=True),
'contract_id':fields.many2one('hr.contract', 'Contract', required=True, select=True),
'rate': fields.float('Rate (%)', digits_compute=dp.get_precision('Payroll Rate')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Payroll')),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Payroll')),
'total': fields.function(_calculate_total, method=True, type='float', string='Total', digits_compute=dp.get_precision('Payroll'),store=True ),
}
_defaults = {
'quantity': 1.0,
'rate': 100.0,
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _calculate_total_wage(self, cr, uid, ids, name, args, context):
if not ids: return {}
res = {}
current_date = datetime.now().strftime('%Y-%m-%d')
for employee in self.browse(cr, uid, ids, context=context):
if not employee.contract_ids:
res[employee.id] = {'basic': 0.0}
continue
cr.execute( 'SELECT SUM(wage) '\
'FROM hr_contract '\
'WHERE employee_id = %s '\
'AND date_start <= %s '\
'AND (date_end > %s OR date_end is NULL)',
(employee.id, current_date, current_date))
result = dict(cr.dictfetchone())
res[employee.id] = {'basic': result['sum']}
return res
def _payslip_count(self, cr, uid, ids, field_name, arg, context=None):
Payslip = self.pool['hr.payslip']
return {
employee_id: Payslip.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'slip_ids':fields.one2many('hr.payslip', 'employee_id', 'Payslips', required=False, readonly=True),
'total_wage': fields.function(_calculate_total_wage, method=True, type='float', string='Total Basic Salary', digits_compute=dp.get_precision('Payroll'), help="Sum of all current contract's wage of employee."),
'payslip_count': fields.function(_payslip_count, type='integer', string='Payslips'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
charbeljc/OCB | addons/hr_recruitment/res_config.py | 352 | 3627 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_applicant_settings(osv.TransientModel):
_name = 'hr.config.settings'
_inherit = ['hr.config.settings', 'fetchmail.config.settings']
_columns = {
'module_document': fields.boolean('Allow the automatic indexation of resumes',
help='Manage your CV\'s and motivation letter related to all applicants.\n'
'-This installs the module document_ftp. This will install the knowledge management module in order to allow you to search using specific keywords through the content of all documents (PDF, .DOCx...)'),
'alias_prefix': fields.char('Default Alias Name for Jobs'),
'alias_domain': fields.char('Alias Domain'),
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_job_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'hr_recruitment.mail_alias_jobs')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'hr.applicant'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'hr.job'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool.get('mail.alias')
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='hr.applicant', alias_parent_model_name='hr.job')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True
| agpl-3.0 |
polimediaupv/edx-platform | cms/djangoapps/contentstore/utils.py | 10 | 11758 | """
Common utility functions useful throughout the contentstore
"""
# pylint: disable=no-member
import logging
from opaque_keys import InvalidKeyError
import re
from datetime import datetime
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from opaque_keys.edx.keys import UsageKey, CourseKey
from student.roles import CourseInstructorRole, CourseStaffRole
from student.models import CourseEnrollment
from student import auth
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course_and_groups(course_key, user_id):
"""
This deletes the courseware associated with a course_key as well as cleaning update_item
the various user table stuff (groups, permissions, etc.)
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
print 'removing User permissions from course....'
# in the django layer, we need to remove all the user permissions groups associated with this course
try:
remove_all_instructors(course_key)
except Exception as err:
log.error("Error in deleting course groups for {0}: {1}".format(course_key, err))
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
if settings.LMS_BASE is None:
return None
if preview:
lms_base = settings.FEATURES.get('PREVIEW_LMS_BASE')
else:
lms_base = settings.LMS_BASE
return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=location.course_key.to_deprecated_string(),
location=location.to_deprecated_string(),
)
def get_lms_link_for_about_page(course_key):
"""
Returns the url to the course about page from the location tuple.
"""
assert isinstance(course_key, CourseKey)
if settings.FEATURES.get('ENABLE_MKTG_SITE', False):
if not hasattr(settings, 'MKTG_URLS'):
log.exception("ENABLE_MKTG_SITE is True, but MKTG_URLS is not defined.")
return None
marketing_urls = settings.MKTG_URLS
# Root will be "https://www.edx.org". The complete URL will still not be exactly correct,
# but redirects exist from www.edx.org to get to the Drupal course about page URL.
about_base = marketing_urls.get('ROOT', None)
if about_base is None:
log.exception('There is no ROOT defined in MKTG_URLS')
return None
# Strip off https:// (or http://) to be consistent with the formatting of LMS_BASE.
about_base = re.sub(r"^https?://", "", about_base)
elif settings.LMS_BASE is not None:
about_base = settings.LMS_BASE
else:
return None
return u"//{about_base_url}/courses/{course_key}/about".format(
about_base_url=about_base,
course_key=course_key.to_deprecated_string()
)
# pylint: disable=invalid-name
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
if settings.LMS_BASE is None:
return None
return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format(
certificate_web_base=settings.LMS_BASE,
user_id=user_id,
course_id=unicode(course_key),
mode=mode
)
def course_image_url(course):
"""Returns the image url for the course."""
try:
loc = StaticContent.compute_location(course.location.course_key, course.course_image)
except InvalidKeyError:
return ''
path = StaticContent.serialize_asset_key_with_slash(loc)
return path
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None:
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific content groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_content_groups(child):
return True
return False
def is_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific content groups.
"""
if not xblock.group_access:
return False
for __, value in xblock.group_access.iteritems():
# value should be a list of group IDs. If it is an empty list or None, the xblock is visible
# to all groups in that particular partition. So if value is a truthy value, the xblock is
# restricted in some way.
if value:
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xblock
# Stop searching at the section level
if xblock.category == 'chapter':
return None
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own staff lock
if not parent_location:
return None
parent = modulestore().get_item(parent_location)
return find_staff_lock_source(parent)
def ancestor_has_staff_lock(xblock, parent_xblock=None):
"""
Returns True iff one of xblock's ancestors has staff lock.
Can avoid mongo query by passing in parent_xblock.
"""
if parent_xblock is None:
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
if not parent_location:
return False
parent_xblock = modulestore().get_item(parent_location)
return parent_xblock.visible_to_staff_only
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None):
"""
Creates the URL for the given handler.
The optional key_name and key_value are passed in as kwargs to the handler.
"""
kwargs_for_reverse = {key_name: unicode(key_value)} if key_name else None
if kwargs:
kwargs_for_reverse.update(kwargs)
return reverse('contentstore.views.' + handler_name, kwargs=kwargs_for_reverse)
def reverse_course_url(handler_name, course_key, kwargs=None):
"""
Creates the URL for handlers that use course_keys as URL parameters.
"""
return reverse_url(handler_name, 'course_key_string', course_key, kwargs)
def reverse_library_url(handler_name, library_key, kwargs=None):
"""
Creates the URL for handlers that use library_keys as URL parameters.
"""
return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
def reverse_usage_url(handler_name, usage_key, kwargs=None):
"""
Creates the URL for handlers that use usage_keys as URL parameters.
"""
return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
def has_active_web_certificate(course):
"""
Returns True if given course has active web certificate configuration.
If given course has no active web certificate configuration returns False.
Returns None If `CERTIFICATES_HTML_VIEW` is not enabled of course has not enabled
`cert_html_view_enabled` settings.
"""
cert_config = None
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False) and course.cert_html_view_enabled:
cert_config = False
certificates = getattr(course, 'certificates', {})
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active'):
cert_config = True
break
return cert_config
| agpl-3.0 |
leonhong/thrift | lib/py/setup.py | 4 | 1434 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from distutils.core import setup, Extension
fastbinarymod = Extension('thrift.protocol.fastbinary',
sources = ['src/protocol/fastbinary.c'],
)
setup(name = 'Thrift',
version = '1.0',
description = 'Thrift Python Libraries',
author = ['Mark Slee'],
author_email = ['mcslee@facebook.com'],
url = 'http://code.facebook.com/thrift',
packages = [
'thrift',
'thrift.protocol',
'thrift.transport',
'thrift.server',
],
package_dir = {'thrift' : 'src'},
ext_modules = [fastbinarymod],
)
| apache-2.0 |
suutari-ai/shoop | shuup/core/migrations/0010_update_managers.py | 4 | 1315 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-22 23:00
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('shuup', '0009_update_tax_name_max_length'),
]
operations = [
migrations.AlterModelManagers(
name='carrier',
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='customcarrier',
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='custompaymentprocessor',
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='paymentprocessor',
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
migrations.AlterModelManagers(
name='serviceprovider',
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
]
| agpl-3.0 |
kow3ns/kubernetes | hack/update_owners.py | 15 | 9035 | #!/usr/bin/env python3
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
import json
import pathlib
import random
import re
import subprocess
import sys
import time
import urllib.request
import zlib
BASE_DIR = pathlib.Path(__file__).resolve()
OWNERS_PATH = str(BASE_DIR.parents[1] / 'test')
OWNERS_JSON_PATH = OWNERS_PATH.replace('.csv', '.json')
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'a-robinson', 'aronchick', 'bgrant0607-nocc', 'david-mcmahon',
'goltermann', 'sarahnovotny'}
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def get_test_history(days_ago):
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json',
time.gmtime(time.time() - days_ago * 24 * 60 * 60))
resp = urllib.request.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def get_test_names_from_test_history():
test_names = set()
for days_ago in range(4):
test_history = get_test_history(days_ago)
test_names.update(normalize(name) for name in test_history['test_names'])
return test_names
def get_test_names_from_local_files():
tests_json = subprocess.check_output(['go', 'run', '../test/list/main.go', '-json'])
tests = json.loads(tests_json)
return {normalize(t['Name'] + (' ' + t['TestName'] if 'k8s.io/' not in t['Name'] else ''))
for t in tests}
def load_owners(fname):
owners = {}
with pathlib.Path(fname).open() as f:
for n, cols in enumerate(csv.reader(f)):
if n == 0:
continue # header
if len(cols) == 3:
# migrate from previous version without sig
(name, owner, random_assignment), sig = cols, ""
else:
(name, owner, random_assignment, sig) = cols
owners[normalize(name)] = (owner, int(random_assignment), sig)
return owners
def write_owners(fname, owners):
with pathlib.Path(fname).open(mode='w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned', 'sig'])
items = sorted(owners.items())
for name, (owner, random_assignment, sig) in items:
out.writerow([name, owner, int(random_assignment), sig])
def get_maintainers():
# GitHub doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"alex-mohr", "apelisse", "aronchick", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"eparis", "erictune", "fabioy", "fejta", "fgrzadkowski", "freehan",
"gmarek", "grodrigues3", "ingvagabund", "ixdy", "janetkuo", "jbeda",
"jessfraz", "jingxu97", "jlowdermilk", "jsafrane", "jszczepkowski",
"justinsb", "Kashomon", "kevin-wangzefeng", "krousey",
"lavalamp", "liggitt", "luxas", "madhusudancs", "maisem", "matchstick",
"mbohlool", "mikedanese", "mml", "mtaufen", "mwielgus", "ncdc",
"nikhiljindal", "piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole",
"Random-Liu", "rmmh", "roberthbailey", "saad-ali", "smarterclayton",
"soltysh", "spxtr", "sttts", "thelinuxfoundation", "thockin",
"timothysc", "tallclair", "vishh", "wojtek-t", "xiang90", "yifan-gu",
"yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def detect_github_username():
origin_url = subprocess.check_output(['git', 'config', 'remote.origin.url'])
m = re.search(r'github.com[:/](.*)/', origin_url)
if m and m.group(1) != 'kubernetes':
return m.group(1)
raise ValueError('unable to determine GitHub user from '
'`git config remote.origin.url` output, run with --user instead')
def sig_prefixes(owners):
# TODO(rmmh): make sig prefixes the only thing in test_owners!
# Precise test names aren't very interesting.
owns = []
for test, (owner, random_assignment, sig) in owners.iteritems():
if 'k8s.io/' in test or not sig:
continue
owns.append([test, sig])
while True:
owns.sort()
for name, sig in owns:
# try removing the last word in the name, use it if all tests beginning
# with this shorter name share the same sig.
maybe_prefix = ' '.join(name.split()[:-1])
matches = [other_sig == sig for other_name, other_sig in owns if other_name.startswith(maybe_prefix)]
if matches and all(matches):
owns = [[n, s] for n, s in owns if not n.startswith(maybe_prefix)]
owns.append([maybe_prefix, sig])
break
else: # iterated completely through owns without any changes
break
sigs = {}
for name, sig in owns:
sigs.setdefault(sig, []).append(name)
return json.dumps(sigs, sort_keys=True, indent=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--history', action='store_true', help='Generate test list from result history.')
parser.add_argument('--user', help='User to assign new tests to (or RANDOM, default: current GitHub user).')
parser.add_argument('--addonly', action='store_true', help='Only add missing tests, do not change existing.')
parser.add_argument('--check', action='store_true', help='Exit with a nonzero status if the test list has changed.')
parser.add_argument('--print_sig_prefixes', action='store_true', help='Emit SIG prefixes for matching.')
options = parser.parse_args()
if options.history:
test_names = get_test_names_from_test_history()
else:
test_names = get_test_names_from_local_files()
test_names = sorted(test_names)
owners = load_owners(OWNERS_PATH)
prefixes = sig_prefixes(owners)
with pathlib.Path(OWNERS_JSON_PATH).open(mode='w') as f:
f.write(prefixes + '\n')
if options.print_sig_prefixes:
print(prefixes)
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print('# OUTDATED TESTS (%d):' % len(outdated_tests))
print('\n'.join('%s -- %s%s' %
(t, owners[t][0], ['', ' (random)'][owners[t][1]])
for t in outdated_tests))
print('# NEW TESTS (%d):' % len(new_tests))
print('\n'.join(new_tests))
if options.check:
if new_tests or outdated_tests:
print('ERROR: The test list has changed')
sys.exit(1)
sys.exit(0)
if not options.user:
options.user = detect_github_username()
for name in outdated_tests:
owners.pop(name)
if not options.addonly:
print('# UNEXPECTED MAINTAINERS ')
print('(randomly assigned, but not in kubernetes-maintainers)')
for name, (owner, random_assignment, _) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print('%-16s %s' % (owner,name))
owners.pop(name)
owner_counts = collections.Counter(
owner for name, (owner, random, sig) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
random_assignment = True
if options.user.lower() == 'random':
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
else:
new_owner = options.user
random_assignment = False
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, random_assignment, "")
if options.user.lower() == 'random':
print('# Tests per maintainer:')
for owner, count in owner_counts.most_common():
print('%-20s %3d' % (owner,count))
write_owners(OWNERS_PATH, owners)
if __name__ == '__main__':
main()
| apache-2.0 |
poiesisconsulting/openerp-restaurant | base_action_rule/tests/base_action_rule_test.py | 395 | 7455 | from openerp import SUPERUSER_ID
from openerp.tests import common
from .. import test_models
class base_action_rule_test(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(base_action_rule_test, self).setUp()
cr, uid = self.cr, self.uid
self.demo = self.registry('ir.model.data').get_object(cr, uid, 'base', 'user_demo').id
self.admin = SUPERUSER_ID
self.model = self.registry('base.action.rule.lead.test')
self.base_action_rule = self.registry('base.action.rule')
def create_filter_done(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in done state",
'is_default': False,
'model_id': 'base.action.rule.lead.test',
'domain': "[('state','=','done')]",
}, context=context)
def create_filter_draft(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in draft state",
'is_default': False,
'model_id': "base.action.rule.lead.test",
'domain' : "[('state','=','draft')]",
}, context=context)
def create_lead_test_1(self, cr, uid, context=None):
"""
Create a new lead_test
"""
return self.model.create(cr, uid, {
'name': "Lead Test 1",
'user_id': self.admin,
}, context=context)
def create_rule(self, cr, uid, kind, filter_id=False, filter_pre_id=False, context=None):
"""
The "Rule 1" says that when a lead goes to the 'draft' state, the responsible for that lead changes to user "demo"
"""
return self.base_action_rule.create(cr,uid,{
'name': "Rule 1",
'model_id': self.registry('ir.model').search(cr, uid, [('model','=','base.action.rule.lead.test')], context=context)[0],
'kind': kind,
'filter_pre_id': filter_pre_id,
'filter_id': filter_id,
'act_user_id': self.demo,
}, context=context)
def delete_rules(self, cr, uid, context=None):
""" delete all the rules on model 'base.action.rule.lead.test' """
action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)
return self.base_action_rule.unlink(cr, uid, action_ids, context=context)
def test_00_check_to_state_draft_pre(self):
"""
Check that a new record (with state = draft) doesn't change its responsible when there is a precondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_01_check_to_state_draft_post(self):
"""
Check that a new record changes its responsible when there is a postcondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_create')
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_with_steps(self):
"""
A new record will be created and will goes from draft to done state via the other states (open, pending and cancel)
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will never change
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to open and check that responsible doen't change"""
new_lead.write({'state': 'open'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'open')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to pending and check that responsible doen't change"""
new_lead.write({'state': 'pending'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'pending')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to cancel and check that responsible doen't change"""
new_lead.write({'state': 'cancel'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'cancel')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible doen't change """
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_without_steps(self):
"""
A new record will be created and will goes from draft to done in one operation
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will change to user "demo"
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible change to Demo_user"""
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
| agpl-3.0 |
alexallah/django | django/contrib/flatpages/forms.py | 60 | 2017 | from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import gettext, gettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(
label=_("URL"),
max_length=100,
regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading and trailing slashes."),
error_messages={
"invalid": _(
"This value must contain only letters, numbers, dots, "
"underscores, dashes, slashes or tildes."
),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
gettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE and
not url.endswith('/')):
raise forms.ValidationError(
gettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url')
sites = self.cleaned_data.get('sites')
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super().clean()
| bsd-3-clause |
benbox69/pyload | module/network/Bucket.py | 41 | 1820 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from time import time
from threading import Lock
class Bucket:
def __init__(self):
self.rate = 0
self.tokens = 0
self.timestamp = time()
self.lock = Lock()
def __nonzero__(self):
return False if self.rate < 10240 else True
def setRate(self, rate):
self.lock.acquire()
self.rate = int(rate)
self.lock.release()
def consumed(self, amount):
""" return time the process have to sleep, after consumed specified amount """
if self.rate < 10240: return 0 #min. 10kb, may become unresponsive otherwise
self.lock.acquire()
self.calc_tokens()
self.tokens -= amount
if self.tokens < 0:
time = -self.tokens/float(self.rate)
else:
time = 0
self.lock.release()
return time
def calc_tokens(self):
if self.tokens < self.rate:
now = time()
delta = self.rate * (now - self.timestamp)
self.tokens = min(self.rate, self.tokens + delta)
self.timestamp = now
| gpl-3.0 |
jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_page_breaks05.py | 8 | 1333 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'page_breaks05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with page breaks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_v_pagebreaks([8, 3, 1, 0])
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
bverburg/CouchPotatoServer | couchpotato/core/notifications/pushalot.py | 81 | 2534 | import traceback
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Pushalot'
class Pushalot(Notification):
urls = {
'api': 'https://pushalot.com/api/sendmessage'
}
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
data = {
'AuthorizationToken': self.conf('auth_token'),
'Title': self.default_title,
'Body': toUnicode(message),
'IsImportant': self.conf('important'),
'IsSilent': self.conf('silent'),
'Image': toUnicode(self.getNotificationImage('medium') + '?1'),
'Source': toUnicode(self.default_title)
}
headers = {
'Content-type': 'application/x-www-form-urlencoded'
}
try:
self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
return True
except:
log.error('PushAlot failed: %s', traceback.format_exc())
return False
config = [{
'name': 'pushalot',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'pushalot',
'description': 'for Windows Phone and Windows 8',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'auth_token',
'label': 'Auth Token',
},
{
'name': 'silent',
'label': 'Silent',
'default': 0,
'type': 'bool',
'description': 'Don\'t send Toast notifications. Only update Live Tile',
},
{
'name': 'important',
'label': 'High Priority',
'default': 0,
'type': 'bool',
'description': 'Send message with High priority.',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
MrLoick/python-for-android | python-build/python-libs/gdata/src/gdata/Crypto/PublicKey/ElGamal.py | 228 | 3947 | #
# ElGamal.py : ElGamal encryption/decryption and signatures
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
class error (Exception):
pass
# Generate an ElGamal key with N bits
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate an ElGamal key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=ElGamalobj()
# Generate prime p
if progress_func:
progress_func('p\n')
obj.p=bignum(getPrime(bits, randfunc))
# Generate random number g
if progress_func:
progress_func('g\n')
size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p
if size<1:
size=bits-1
while (1):
obj.g=bignum(getPrime(size, randfunc))
if obj.g < obj.p:
break
size=(size+1) % bits
if size==0:
size=4
# Generate random number x
if progress_func:
progress_func('x\n')
while (1):
size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p
if size>2:
break
while (1):
obj.x=bignum(getPrime(size, randfunc))
if obj.x < obj.p:
break
size = (size+1) % bits
if size==0:
size=4
if progress_func:
progress_func('y\n')
obj.y = pow(obj.g, obj.x, obj.p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)))
: ElGamalobj
Construct an ElGamal key from a 3- or 4-tuple of numbers.
"""
obj=ElGamalobj()
if len(tuple) not in [3,4]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class ElGamalobj(pubkey):
keydata=['p', 'g', 'y', 'x']
def _encrypt(self, M, K):
a=pow(self.g, K, self.p)
b=( M*pow(self.y, K, self.p) ) % self.p
return ( a,b )
def _decrypt(self, M):
if (not hasattr(self, 'x')):
raise error, 'Private key not available in this object'
ax=pow(M[0], self.x, self.p)
plaintext=(M[1] * inverse(ax, self.p ) ) % self.p
return plaintext
def _sign(self, M, K):
if (not hasattr(self, 'x')):
raise error, 'Private key not available in this object'
p1=self.p-1
if (GCD(K, p1)!=1):
raise error, 'Bad K value: GCD(K,p-1)!=1'
a=pow(self.g, K, self.p)
t=(M-self.x*a) % p1
while t<0: t=t+p1
b=(t*inverse(K, p1)) % p1
return (a, b)
def _verify(self, M, sig):
v1=pow(self.y, sig[0], self.p)
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
v2=pow(self.g, M, self.p)
if v1==v2:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.g, self.y))
object=ElGamalobj
| apache-2.0 |
m-takeuchi/ilislife_wxp | make_rsa_kay.py | 2 | 2959 | #!/usr/bin/env python3
# coding=utf-8
from Crypto.PublicKey import RSA
from Crypto import Random
import os,sys
random_func = Random.new().read
rsa = RSA.generate(2048, random_func)
def get_id_rsa(id_rsa_file, passphrase=None):
with open(id_rsa_file, 'rb') as f:
id_rsa = RSA.importKey(f.read())
return id_rsa
def get_id_rsa_pub(id_rsa_pub_file, passphrase=None):
with open(id_rsa_pub_file, 'rb') as f:
id_rsa_pub = RSA.importKey(f.read())
# id_rsa_pub = RSA.pudkey(f.read())
return id_rsa_pub
def make_id_rsa(id_rsa_file, passphrase=None):
# private_key = rsa.exportKey(format='PEM', passphrase='hogehoge')
private_key = rsa.exportKey(format='PEM', passphrase=passphrase)
with open(id_rsa_file, 'wb') as f:
f.write(private_key)
def make_id_rsa_pub(id_rsa_pub_file, id_rsa_file, pasphrase=None):
id_rsa = get_id_rsa(id_rsa_file)
# print(id_rsa)
public_pem = id_rsa.publickey().exportKey()
# print(public_pem)
with open(id_rsa_pub_file, 'wb') as f:
f.write(public_pem)
def encrypt_file(id_rsa_pub_file, plain_text_file, encrypted_text_file):
# # 公開鍵による暗号化
id_rsa_pub = get_id_rsa_pub(id_rsa_pub_file)
print(id_rsa_pub)
with open(plain_text_file, 'r') as f:
plain_text = f.read()
with open(encrypted_text_file, 'wb') as f2:
f2.write(id_rsa_pub.encrypt(plain_text, random_func)[0])
def encrypt_str(id_rsa_pub_file, string, encrypted_text_file):
# # 公開鍵による暗号化
id_rsa_pub = get_id_rsa_pub(id_rsa_pub_file)
print(id_rsa_pub)
with open(encrypted_text_file, 'wb') as f:
f.write(id_rsa_pub.encrypt(string, random_func)[0])
#
# # 秘密鍵による復号化
# with open('cipher.txt', 'r') as f:
# with open('plain_decoded.txt', 'w') as f2:
# f2.write(RSA.importKey(private_pem, 'hogehoge').decrypt(f.read()))
#
# # 秘密鍵による電子署名の作成
# with open('file.txt', 'r') as f:
# with open('signature.bin', 'w') as f2:
# f2.write(str(RSA.importKey(private_pem, 'hogehoge').sign(f.read(), random_func)[0]))
#
# # 公開鍵による電子署名の検証
# with open('signature.bin', 'r') as f:
# with open('file.txt', 'r') as f2:
# rsa.verify(f2.read(), (long(f.read()),))
if __name__ == '__main__':
HOME = os.path.expanduser('~/')
SSH = os.path.expanduser(HOME+'.ssh/')
RSA_FILES = {'PRV': SSH+'id_rsa', 'PUB':SSH+'id_rsa.pub'}
if not os.path.isdir(SSH):
print('No such '+SSH+ 'directory. '+'Making ' + SSH)
os.mkdir(SSH)
if not os.path.isfile(RSA_FILES['PRV']):
make_id_rsa(RSA_FILES['PRV'], 600)
if not os.path.isfile(RSA_FILES['PUB']):
make_id_rsa_pub(RSA_FILES['PUB'], RSA_FILES['PRV'])
input_gmail_password = input('Please input gmail password>>> ')
# print(input_gmail_password)
encrypt_str(RSA_FILES['PUB'], bytes(input_gmail_password, 'utf-8'), 'pass.rsa')
| mit |
n3ocort3x/msm_htc_helper | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
Dingmatt/AMSA | Plug-ins/Amsa.bundle/Contents/Libraries/Shared/unidecode/x0fc.py | 253 | 3595 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| gpl-3.0 |
BernardFW/bernard | src/bernard/platforms/test/platform.py | 1 | 4333 | # coding: utf-8
from typing import (
List,
Text,
Tuple,
Type,
)
from bernard.engine.fsm import (
FSM,
)
from bernard.engine.platform import (
Platform,
)
from bernard.engine.request import (
BaseMessage,
Conversation,
Request,
User,
)
from bernard.engine.responder import (
Responder,
)
from bernard.engine.state import (
BaseState,
)
from bernard.layers import (
BaseLayer,
Stack,
)
from bernard.media.base import (
BaseMedia,
)
from bernard.storage.register import (
Register,
)
from bernard.utils import (
run,
)
class TestUser(User):
"""
Mock user object
"""
def get_friendly_name(self) -> Text:
return 'Test'
def get_formal_name(self) -> Text:
return 'Formal Test'
def get_full_name(self) -> Text:
return 'Full Test'
async def get_timezone(self):
return None
async def get_locale(self):
return None
class TestConversation(Conversation):
"""
Mock conversation object.
"""
class TestMessage(BaseMessage):
"""
Mock message (with arbitrary content)
"""
def __init__(self, stack: Stack):
self.stack = stack
def get_platform(self):
"""
That's the test platform
"""
return 'test'
def get_user(self):
"""
So far the user is static
"""
return TestUser('test:test-id')
def get_conversation(self):
"""
So far the conversation is static
"""
return TestConversation('test:test-id')
def get_layers(self):
"""
We'll return the layers set at init time.
"""
return self.stack.layers
class TestResponder(Responder):
"""
It's just a proxy to the platform's `send()` method.
"""
class TestPlatform(Platform):
"""
This is a platform especially design for unit testing. You can create it
using the `make_test_fsm()` method below.
The usage is pretty simple:
>>> from bernard import layers as l
>>> from bernard.i18n import translate as t
>>> from tests.issue_0001.states import Hello
>>> _, platform = make_test_fsm()
>>> platform.handle(l.Text('Hello!'))
>>> platform.assert_state(Hello)
>>> platform.assert_sent(l.stack(l.Text(t.HELLO)))
"""
NAME = 'test'
fsm_creates_task = False
def __init__(self):
super(TestPlatform, self).__init__()
self.sent = [] # type: List[Stack]
async def send(self, request: Request, stack: Stack):
"""
Store the message to be sent for later analysis
"""
self.sent.append(stack)
def accept(self, stack: Stack):
"""
So far we accept anything, it's up to the test to test that things sent
are the right ones.
"""
return True
def handle(self, *layers: BaseLayer):
"""
Call this method to send a test message. Call it OUTSIDE the async
loop. It will return when the message is fully handled.
"""
self.sent = []
stack = Stack(list(layers))
message = TestMessage(stack)
responder = TestResponder(self)
run(self._notify(message, responder))
def assert_sent(self, *stacks: Stack):
"""
Assert that the sent stacks are identical to the ones provided as
argument here.
"""
assert len(stacks) == len(self.sent)
for s1, s2 in zip(stacks, self.sent):
assert s1 == s2
def assert_state(self, state_class: Type[BaseState]):
"""
Assert that the state returned in the register is the one passed as
argument.
"""
assert self._register
assert Register.STATE in self._register
assert self._register[Register.STATE] == state_class.name()
def ensure_usable_media(self, media: BaseMedia) -> BaseMedia:
return media
def make_test_fsm() -> Tuple[FSM, TestPlatform]:
"""
Generate both a FSM and a test platform for unit testing purposes.
The will use the current configuration to load stories and transitions.
"""
fsm = FSM()
run(fsm.async_init())
platform = TestPlatform()
# noinspection PyTypeChecker
platform.on_message(fsm.handle_message)
return fsm, platform
| agpl-3.0 |
jonathan-beard/edx-platform | openedx/core/lib/api/permissions.py | 74 | 3045 | from django.conf import settings
from rest_framework import permissions
from django.http import Http404
from student.roles import CourseStaffRole
class ApiKeyHeaderPermission(permissions.BasePermission):
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
If settings.DEBUG is True and settings.EDX_API_KEY is not set or None,
then allow the request. Otherwise, allow the request if and only if
settings.EDX_API_KEY is set and the X-Edx-Api-Key HTTP header is
present in the request and matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
return (
(settings.DEBUG and api_key is None) or
(api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key)
)
class ApiKeyHeaderPermissionIsAuthenticated(ApiKeyHeaderPermission, permissions.IsAuthenticated):
"""
Allow someone to access the view if they have the API key OR they are authenticated.
See ApiKeyHeaderPermission for more information how the API key portion is implemented.
"""
def has_permission(self, request, view):
#TODO We can optimize this later on when we know which of these methods is used more often.
api_permissions = ApiKeyHeaderPermission.has_permission(self, request, view)
is_authenticated_permissions = permissions.IsAuthenticated.has_permission(self, request, view)
return api_permissions or is_authenticated_permissions
class IsUserInUrl(permissions.BasePermission):
"""
Permission that checks to see if the request user matches the user in the URL.
"""
def has_permission(self, request, view):
"""
Returns true if the current request is by the user themselves.
Note: a 404 is returned for non-staff instead of a 403. This is to prevent
users from being able to detect the existence of accounts.
"""
url_username = request.parser_context.get('kwargs', {}).get('username', '')
if request.user.username.lower() != url_username.lower():
if request.user.is_staff:
return False # staff gets 403
raise Http404()
return True
class IsUserInUrlOrStaff(IsUserInUrl):
"""
Permission that checks to see if the request user matches the user in the URL or has is_staff access.
"""
def has_permission(self, request, view):
if request.user.is_staff:
return True
return super(IsUserInUrlOrStaff, self).has_permission(request, view)
class IsStaffOrReadOnly(permissions.BasePermission):
"""Permission that checks to see if the user is global or course
staff, permitting only read-only access if they are not.
"""
def has_object_permission(self, request, view, obj):
return (request.user.is_staff or
CourseStaffRole(obj.course_id).has_user(request.user) or
request.method in permissions.SAFE_METHODS)
| agpl-3.0 |
FelixZYY/gyp | test/subdirectory/gyptest-top-all.py | 261 | 1373 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementation of Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause |
etherkit/OpenBeacon2 | client/macos/venv/lib/python3.8/site-packages/PyInstaller/depend/bindepend.py | 3 | 37312 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Find external dependencies of binary libraries.
"""
import ctypes.util
import os
import re
import sys
from glob import glob
# Required for extracting eggs.
import zipfile
import collections
from .. import compat
from ..compat import (is_win, is_win_10, is_unix,
is_aix, is_solar, is_cygwin, is_hpux,
is_darwin, is_freebsd, is_openbsd, is_venv, is_conda,
base_prefix, PYDYLIB_NAMES)
from . import dylib, utils
from .. import log as logging
from ..utils.win32 import winutils
logger = logging.getLogger(__name__)
seen = set()
# Import windows specific stuff.
if is_win:
from ..utils.win32.winmanifest import RT_MANIFEST
from ..utils.win32.winmanifest import GetManifestResources
from ..utils.win32.winmanifest import Manifest
from ..utils.win32 import winresource
import pefile
# Do not load all the directories information from the PE file
pefile.fast_load = True
def getfullnameof(mod, xtrapath=None):
"""
Return the full path name of MOD.
MOD is the basename of a dll or pyd.
XTRAPATH is a path or list of paths to search first.
Return the full path name of MOD.
Will search the full Windows search path, as well as sys.path
"""
# TODO: Allow in import-hooks to specify additional paths where the PyInstaller
# should look for other libraries.
# Or allow to automatically look for dlls in directories where are .pyd files.
# SciPy/Numpy Windows builds from http://www.lfd.uci.edu/~gohlke/pythonlibs
# Contain some dlls in directory like C:\Python27\Lib\site-packages\numpy\core\
from distutils.sysconfig import get_python_lib
numpy_core_paths = [os.path.join(get_python_lib(), 'numpy', 'core')]
# In virtualenv numpy might be installed directly in real prefix path.
# Then include this path too.
if is_venv:
numpy_core_paths.append(
os.path.join(base_prefix, 'Lib', 'site-packages', 'numpy', 'core')
)
# TODO check if this 'numpy' workaround is still necessary!
# Search sys.path first!
epath = (sys.path + numpy_core_paths + winutils.get_system_path() +
compat.getenv('PATH', '').split(os.pathsep))
if xtrapath is not None:
if type(xtrapath) == type(''):
epath.insert(0, xtrapath)
else:
epath = xtrapath + epath
for p in epath:
npth = os.path.join(p, mod)
if os.path.exists(npth) and matchDLLArch(npth):
return npth
return ''
def _getImports_pe(pth):
"""
Find the binary dependencies of PTH.
This implementation walks through the PE header
and uses library pefile for that and supports
32/64bit Windows
"""
dlls = set()
# By default library pefile parses all PE information.
# We are only interested in the list of dependent dlls.
# Performance is improved by reading only needed information.
# https://code.google.com/p/pefile/wiki/UsageExamples
pe = pefile.PE(pth, fast_load=True)
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],
],
forwarded_exports_only=True,
import_dllnames_only=True,
)
# Some libraries have no other binary dependencies. Use empty list
# in that case. Otherwise pefile would return None.
# e.g. C:\windows\system32\kernel32.dll on Wine
for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):
dll_str = winutils.convert_dll_name_to_str(entry.dll)
dlls.add(dll_str)
# We must also read the exports table to find forwarded symbols:
# http://blogs.msdn.com/b/oldnewthing/archive/2006/07/19/671238.aspx
exportSymbols = getattr(pe, 'DIRECTORY_ENTRY_EXPORT', None)
if exportSymbols:
for sym in exportSymbols.symbols:
if sym.forwarder is not None:
# sym.forwarder is a bytes object. Convert it to a string.
forwarder = winutils.convert_dll_name_to_str(sym.forwarder)
# sym.forwarder is for example 'KERNEL32.EnterCriticalSection'
dll, _ = forwarder.split('.')
dlls.add(dll + ".dll")
pe.close()
return dlls
def _extract_from_egg(toc):
"""
Ensure all binary modules in zipped eggs get extracted and
included with the frozen executable.
return modified table of content
"""
new_toc = []
for item in toc:
# Item is a tupple
# (mod_name, path, type)
modname, pth, typ = item
if not os.path.isfile(pth):
pth = check_extract_from_egg(pth)[0][0]
# Add value to new data structure.
new_toc.append((modname, pth, typ))
return new_toc
BindingRedirect = collections.namedtuple('BindingRedirect',
'name language arch oldVersion newVersion publicKeyToken')
def match_binding_redirect(manifest, redirect):
return all([
manifest.name == redirect.name,
manifest.version == redirect.oldVersion,
manifest.language == redirect.language,
manifest.processorArchitecture == redirect.arch,
manifest.publicKeyToken == redirect.publicKeyToken,
])
_exe_machine_type = None
def matchDLLArch(filename):
"""
Return True if the DLL given by filename matches the CPU type/architecture of the
Python process running PyInstaller.
Always returns True on non-Windows platforms
:param filename:
:type filename:
:return:
:rtype:
"""
# TODO: check machine type on other platforms?
if not is_win:
return True
global _exe_machine_type
try:
if _exe_machine_type is None:
pefilename = sys.executable # for exception handling
exe_pe = pefile.PE(sys.executable, fast_load=True)
_exe_machine_type = exe_pe.FILE_HEADER.Machine
exe_pe.close()
pefilename = filename # for exception handling
pe = pefile.PE(filename, fast_load=True)
match_arch = pe.FILE_HEADER.Machine == _exe_machine_type
pe.close()
except pefile.PEFormatError as exc:
raise SystemExit('Can not get architecture from file: %s\n'
' Reason: %s' % (pefilename, exc))
return match_arch
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
for nm, pth, typ in lTOC:
if nm.upper() in seen:
continue
logger.debug("Analyzing %s", pth)
seen.add(nm.upper())
if is_win:
for ftocnm, fn in getAssemblyFiles(pth, manifest, redirects):
lTOC.append((ftocnm, fn, 'BINARY'))
for lib, npth in selectImports(pth, xtrapath):
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
def pkg_resources_get_default_cache():
"""
Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a 'Python-Eggs' subdirectory of the
'Application Data' directory. On all other systems, it's '~/.python-eggs'.
"""
# This function borrowed from setuptools/pkg_resources
egg_cache = compat.getenv('PYTHON_EGG_CACHE')
if egg_cache is not None:
return egg_cache
if os.name != 'nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE', 'HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, compat.getenv(key))
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE environment variable"
)
def check_extract_from_egg(pth, todir=None):
r"""
Check if path points to a file inside a python egg file, extract the
file from the egg to a cache directory (following pkg_resources
convention) and return [(extracted path, egg file path, relative path
inside egg file)].
Otherwise, just return [(original path, None, None)].
If path points to an egg file directly, return a list with all files
from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
"""
rv = []
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
for i, name in enumerate(components):
if name.lower().endswith(".egg"):
eggpth = os.path.sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
try:
egg = zipfile.ZipFile(eggpth)
except zipfile.BadZipfile as e:
raise SystemExit("Error: %s %s" % (eggpth, e))
if todir is None:
# Use the same directory as setuptools/pkg_resources. So,
# if the specific egg was accessed before (not necessarily
# by pyinstaller), the extracted contents already exist
# (pkg_resources puts them there) and can be used.
todir = os.path.join(pkg_resources_get_default_cache(),
name + "-tmp")
if components[i + 1:]:
members = ["/".join(components[i + 1:])]
else:
members = egg.namelist()
for member in members:
pth = os.path.join(todir, member)
if not os.path.isfile(pth):
dirname = os.path.dirname(pth)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(pth, "wb") as f:
f.write(egg.read(member))
rv.append((pth, eggpth, member))
return rv
return [(pth, None, None)]
def getAssemblies(pth):
"""
On Windows return the dependent Side-by-Side (SxS) assemblies of a binary as a
list of Manifest objects.
Dependent assemblies are required only by binaries compiled with MSVC 9.0.
Python 2.7 and 3.2 is compiled with MSVC 9.0 and thus depends on Microsoft
Redistributable runtime libraries 9.0.
Python 3.3+ is compiled with version 10.0 and does not use SxS assemblies.
FIXME: Can this be removed since we now only support Python 3.5+?
FIXME: IS there some test-case covering this?
"""
if pth.lower().endswith(".manifest"):
return []
# check for manifest file
manifestnm = pth + ".manifest"
if os.path.isfile(manifestnm):
with open(manifestnm, "rb") as fd:
res = {RT_MANIFEST: {1: {0: fd.read()}}}
else:
# check the binary for embedded manifest
try:
res = GetManifestResources(pth)
except winresource.pywintypes.error as exc:
if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
logger.info('Cannot get manifest resource from non-PE '
'file %s', pth)
return []
raise
rv = []
if RT_MANIFEST in res and len(res[RT_MANIFEST]):
for name in res[RT_MANIFEST]:
for language in res[RT_MANIFEST][name]:
# check the manifest for dependent assemblies
try:
manifest = Manifest()
manifest.filename = ":".join([pth, str(RT_MANIFEST),
str(name), str(language)])
manifest.parse_string(res[RT_MANIFEST][name][language],
False)
except Exception as exc:
logger.error("Can not parse manifest resource %s, %s"
" from %s", name, language, pth, exc_info=1)
else:
if manifest.dependentAssemblies:
logger.debug("Dependent assemblies of %s:", pth)
logger.debug(", ".join([assembly.getid()
for assembly in
manifest.dependentAssemblies]))
rv.extend(manifest.dependentAssemblies)
return rv
def getAssemblyFiles(pth, manifest=None, redirects=None):
"""
Find all assemblies that are dependencies of the given binary and return the files
that make up the assemblies as (name, fullpath) tuples.
If a WinManifest object is passed as `manifest`, also updates that manifest to
reference the returned assemblies. This is done only to update the built app's .exe
with the dependencies of python.exe
If a list is passed as `redirects`, and binding redirects in policy files are
applied when searching for assemblies, BindingRedirect objects are appended to this
list.
Return a list of pairs (name, fullpath)
"""
rv = []
if manifest:
_depNames = set(dep.name for dep in manifest.dependentAssemblies)
for assembly in getAssemblies(pth):
if assembly.getid().upper() in seen:
continue
if manifest and assembly.name not in _depNames:
# Add assembly as dependency to our final output exe's manifest
logger.info("Adding %s to dependent assemblies "
"of final executable\n required by %s",
assembly.name, pth)
manifest.dependentAssemblies.append(assembly)
_depNames.add(assembly.name)
if not dylib.include_library(assembly.name):
logger.debug("Skipping assembly %s", assembly.getid())
continue
if assembly.optional:
logger.debug("Skipping optional assembly %s", assembly.getid())
continue
from ..config import CONF
if CONF.get("win_no_prefer_redirects"):
files = assembly.find_files()
else:
files = []
if not len(files):
# If no files were found, it may be the case that the required version
# of the assembly is not installed, and the policy file is redirecting it
# to a newer version. So, we collect the newer version instead.
files = assembly.find_files(ignore_policies=False)
if len(files) and redirects is not None:
# New version was found, old version was not. Add a redirect in the
# app configuration
old_version = assembly.version
new_version = assembly.get_policy_redirect()
logger.info("Adding redirect %s version %s -> %s",
assembly.name, old_version, new_version)
redirects.append(BindingRedirect(
name=assembly.name,
language=assembly.language,
arch=assembly.processorArchitecture,
publicKeyToken=assembly.publicKeyToken,
oldVersion=old_version,
newVersion=new_version,
))
if files:
seen.add(assembly.getid().upper())
for fn in files:
fname, fext = os.path.splitext(fn)
if fext.lower() == ".manifest":
nm = assembly.name + fext
else:
nm = os.path.basename(fn)
ftocnm = nm
if assembly.language not in (None, "", "*", "neutral"):
ftocnm = os.path.join(assembly.getlanguage(),
ftocnm)
nm, ftocnm, fn = [item.encode(sys.getfilesystemencoding())
for item in
(nm,
ftocnm,
fn)]
if fn.upper() not in seen:
logger.debug("Adding %s", ftocnm)
seen.add(nm.upper())
seen.add(fn.upper())
rv.append((ftocnm, fn))
else:
#logger.info("skipping %s part of assembly %s dependency of %s",
# ftocnm, assembly.name, pth)
pass
else:
logger.error("Assembly %s not found", assembly.getid())
# Convert items in list from 'bytes' type to 'str' type.
# NOTE: With Python 3 we somehow get type 'bytes' and it
# then causes other issues and failures with PyInstaller.
new_rv = []
for item in rv:
a = item[0].decode('ascii')
b = item[1].decode('ascii')
new_rv.append((a, b))
rv = new_rv
return rv
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if lib.upper() in seen:
continue
if not is_win and not is_cygwin:
# all other platforms
npth = lib
lib = os.path.basename(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# now npth is a candidate lib if found
# check again for excludes but with regex FIXME: split the list
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if (candidatelib.find('libpython') < 0 and
candidatelib.find('Python.framework') < 0):
# skip libs not containing (libpython or Python.framework)
if npth.upper() not in seen:
logger.debug("Skipping %s dependency of %s",
lib, os.path.basename(pth))
continue
else:
pass
if npth:
if npth.upper() not in seen:
logger.debug("Adding %s dependency of %s from %s",
lib, os.path.basename(pth), npth)
rv.append((lib, npth))
else:
# Don't spew out false warnings on win 10 and UCRT (see issue
# #1566).
if not (is_win_10 and lib.startswith("api-ms-win-crt")):
logger.warning("lib not found: %s dependency of %s", lib, pth)
return rv
def _getImports_ldd(pth):
"""
Find the binary dependencies of PTH.
This implementation is for ldd platforms (mostly unix).
"""
rslt = set()
if is_aix:
# Match libs of the form
# 'archivelib.a(objectmember.so/.o)'
# or
# 'sharedlib.so'
# Will not match the fake lib '/unix'
lddPattern = re.compile(r"^\s*(((?P<libarchive>(.*\.a))(?P<objectmember>\(.*\)))|((?P<libshared>(.*\.so))))$")
elif is_hpux:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
elif is_solar:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'
# Will not match the platform specific libs starting with '/platform'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
else:
lddPattern = re.compile(r"\s*(.*?)\s+=>\s+(.*?)\s+\(.*\)")
for line in compat.exec_command('ldd', pth).splitlines():
m = lddPattern.search(line)
if m:
if is_aix:
libarchive = m.group('libarchive')
if libarchive:
# We matched an archive lib with a request for a particular
# embedded shared object.
# 'archivelib.a(objectmember.so/.o)'
lib = libarchive
name = os.path.basename(lib) + m.group('objectmember')
else:
# We matched a stand-alone shared library.
# 'sharedlib.so'
lib = m.group('libshared')
name = os.path.basename(lib)
elif is_hpux:
name, lib = m.group(1), m.group(2)
else:
name, lib = m.group(1), m.group(2)
if name[:10] in ('linux-gate', 'linux-vdso'):
# linux-gate is a fake library which does not exist and
# should be ignored. See also:
# http://www.trilithium.com/johan/2005/08/linux-gate/
continue
if os.path.exists(lib):
# Add lib if it is not already found.
if lib not in rslt:
rslt.add(lib)
else:
logger.error('Can not find %s in path %s (needed by %s)',
name, lib, pth)
return rslt
def _getImports_macholib(pth):
"""
Find the binary dependencies of PTH.
This implementation is for Mac OS X and uses library macholib.
"""
from macholib.MachO import MachO
from macholib.mach_o import LC_RPATH
from macholib.dyld import dyld_find
rslt = set()
seen = set() # Libraries read from binary headers.
## Walk through mach binary headers.
m = MachO(pth)
for header in m.headers:
for idx, name, lib in header.walkRelocatables():
# Sometimes some libraries are present multiple times.
if lib not in seen:
seen.add(lib)
# Walk through mach binary headers and look for LC_RPATH.
# macholib can't handle @rpath. LC_RPATH has to be read
# from the MachO header.
# TODO Do we need to remove LC_RPATH from MachO load commands?
# Will it cause any harm to leave them untouched?
# Removing LC_RPATH should be implemented when getting
# files from the bincache if it is necessary.
run_paths = set()
for header in m.headers:
for command in header.commands:
# A command is a tupple like:
# (<macholib.mach_o.load_command object at 0x>,
# <macholib.mach_o.rpath_command object at 0x>,
# '../lib\x00\x00')
cmd_type = command[0].cmd
if cmd_type == LC_RPATH:
rpath = command[2].decode('utf-8')
# Remove trailing '\x00' characters.
# e.g. '../lib\x00\x00'
rpath = rpath.rstrip('\x00')
# Replace the @executable_path and @loader_path keywords
# with the actual path to the binary.
executable_path = os.path.dirname(pth)
rpath = re.sub('^@(executable_path|loader_path|rpath)(/|$)',
executable_path + r'\2', rpath)
# Make rpath absolute. According to Apple doc LC_RPATH
# is always relative to the binary location.
rpath = os.path.normpath(os.path.join(executable_path, rpath))
run_paths.update([rpath])
else:
# Frameworks that have this structure Name.framework/Versions/N/Name
# need to to search at the same level as the framework dir.
# This is specifically needed so that the QtWebEngine dependencies
# can be found.
if '.framework' in pth:
run_paths.update(['../../../'])
# for distributions like Anaconda, all of the dylibs are stored in the lib directory
# of the Python distribution, not alongside of the .so's in each module's subdirectory.
run_paths.add(os.path.join(base_prefix, 'lib'))
## Try to find files in file system.
# In cases with @loader_path or @executable_path
# try to look in the same directory as the checked binary is.
# This seems to work in most cases.
exec_path = os.path.abspath(os.path.dirname(pth))
for lib in seen:
# Suppose that @rpath is not used for system libraries and
# using macholib can be avoided.
# macholib can't handle @rpath.
if lib.startswith('@rpath'):
lib = lib.replace('@rpath', '.') # Make path relative.
final_lib = None # Absolute path to existing lib on disk.
# Try multiple locations.
for run_path in run_paths:
# @rpath may contain relative value. Use exec_path as
# base path.
if not os.path.isabs(run_path):
run_path = os.path.join(exec_path, run_path)
# Stop looking for lib when found in first location.
if os.path.exists(os.path.join(run_path, lib)):
final_lib = os.path.abspath(os.path.join(run_path, lib))
rslt.add(final_lib)
break
# Log error if no existing file found.
if not final_lib:
logger.error('Can not find path %s (needed by %s)', lib, pth)
# Macholib has to be used to get absolute path to libraries.
else:
# macholib can't handle @loader_path. It has to be
# handled the same way as @executable_path.
# It is also replaced by 'exec_path'.
if lib.startswith('@loader_path'):
lib = lib.replace('@loader_path', '@executable_path')
try:
lib = dyld_find(lib, executable_path=exec_path)
rslt.add(lib)
except ValueError:
logger.error('Can not find path %s (needed by %s)', lib, pth)
return rslt
def getImports(pth):
"""
Forwards to the correct getImports implementation for the platform.
"""
if is_win or is_cygwin:
if pth.lower().endswith(".manifest"):
return []
try:
return _getImports_pe(pth)
except Exception as exception:
# Assemblies can pull in files which aren't necessarily PE,
# but are still needed by the assembly. Any additional binary
# dependencies should already have been handled by
# selectAssemblies in that case, so just warn, return an empty
# list and continue.
# For less specific errors also log the traceback.
logger.warning('Can not get binary dependencies for file: %s', pth)
logger.warning(
' Reason: %s', exception,
exc_info=not isinstance(exception, pefile.PEFormatError))
return []
elif is_darwin:
return _getImports_macholib(pth)
else:
return _getImports_ldd(pth)
def findLibrary(name):
"""
Look for a library in the system.
Emulate the algorithm used by dlopen.
`name`must include the prefix, e.g. ``libpython2.4.so``
"""
assert is_unix, ("Current implementation for Unix only (Linux, Solaris, "
"AIX, FreeBSD)")
lib = None
# Look in the LD_LIBRARY_PATH according to platform.
if is_aix:
lp = compat.getenv('LIBPATH', '')
elif is_darwin:
lp = compat.getenv('DYLD_LIBRARY_PATH', '')
else:
lp = compat.getenv('LD_LIBRARY_PATH', '')
for path in lp.split(os.pathsep):
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# Look in /etc/ld.so.cache
# Solaris does not have /sbin/ldconfig. Just check if this file exists.
if lib is None:
utils.load_ldconfig_cache()
lib = utils.LDCONFIG_CACHE.get(name)
if lib:
assert os.path.isfile(lib)
# Look in the known safe paths.
if lib is None:
# Architecture independent locations.
paths = ['/lib', '/usr/lib']
# Architecture dependent locations.
if compat.architecture == '32bit':
paths.extend(['/lib32', '/usr/lib32', '/usr/lib/i386-linux-gnu'])
else:
paths.extend(['/lib64', '/usr/lib64', '/usr/lib/x86_64-linux-gnu'])
# On Debian/Ubuntu /usr/bin/python is linked statically with libpython.
# Newer Debian/Ubuntu with multiarch support putsh the libpythonX.Y.so
# To paths like /usr/lib/i386-linux-gnu/.
try:
# Module available only in Python 2.7+
import sysconfig
# 'multiarchsubdir' works on Debian/Ubuntu only in Python 2.7 and 3.3+.
arch_subdir = sysconfig.get_config_var('multiarchsubdir')
# Ignore if None is returned.
if arch_subdir:
arch_subdir = os.path.basename(arch_subdir)
paths.append(os.path.join('/usr/lib', arch_subdir))
else:
logger.debug('Multiarch directory not detected.')
except ImportError:
logger.debug('Multiarch directory not detected.')
if is_aix:
paths.append('/opt/freeware/lib')
elif is_hpux:
if compat.architecture == '32bit':
paths.append('/usr/local/lib/hpux32')
else:
paths.append('/usr/local/lib/hpux64')
elif is_freebsd or is_openbsd:
paths.append('/usr/local/lib')
for path in paths:
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# give up :(
if lib is None:
return None
# Resolve the file name into the soname
if is_freebsd or is_aix or is_openbsd:
# On FreeBSD objdump doesn't show SONAME,
# and on AIX objdump does not exist,
# so we just return the lib we've found
return lib
else:
dir = os.path.dirname(lib)
return os.path.join(dir, _get_so_name(lib))
def _get_so_name(filename):
"""
Return the soname of a library.
Soname is usefull whene there are multiple symplinks to one library.
"""
# TODO verify that objdump works on other unixes and not Linux only.
cmd = ["objdump", "-p", filename]
pattern = r'\s+SONAME\s+([^\s]+)'
if is_solar:
cmd = ["elfdump", "-d", filename]
pattern = r'\s+SONAME\s+[^\s]+\s+([^\s]+)'
m = re.search(pattern, compat.exec_command(*cmd))
return m.group(1)
def get_python_library_path():
"""
Find dynamic Python library that will be bundled with frozen executable.
NOTOE: This is a fallback option when Python library is probably linked
statically with the Python executable and we need to search more for it.
On Debian/Ubuntu this is the case.
Return full path to Python dynamic library or None when not found.
We need to know name of the Python dynamic library for the bootloader.
Bootloader has to know what library to load and not trying to guess.
Some linux distributions (e.g. debian-based) statically build the
Python executable to the libpython, so bindepend doesn't include
it in its output. In this situation let's try to find it.
Darwin custom builds could possibly also have non-framework style libraries,
so this method also checks for that variant as well.
"""
def _find_lib_in_libdirs(*libdirs):
for libdir in libdirs:
for name in PYDYLIB_NAMES:
full_path = os.path.join(libdir, name)
if os.path.exists(full_path):
return full_path
return None
# Try to get Python library name from the Python executable. It assumes that Python
# library is not statically linked.
dlls = getImports(sys.executable)
for filename in dlls:
for name in PYDYLIB_NAMES:
if os.path.basename(filename) == name:
# On Windows filename is just like 'python27.dll'. Convert it
# to absolute path.
if is_win and not os.path.isabs(filename):
filename = getfullnameof(filename)
# Python library found. Return absolute path to it.
return filename
# Python library NOT found. Resume searching using alternative methods.
# Work around for python venv having VERSION.dll rather than pythonXY.dll
if is_win and 'VERSION.dll' in dlls:
pydll = 'python%d%d.dll' % sys.version_info[:2]
return getfullnameof(pydll)
# Applies only to non Windows platforms and conda.
if is_conda:
# Conda needs to be the first here since it overrules the operating
# system specific paths.
python_libname = _find_lib_in_libdirs(
os.path.join(compat.base_prefix, 'lib'))
if python_libname:
return python_libname
elif is_unix:
for name in PYDYLIB_NAMES:
python_libname = findLibrary(name)
if python_libname:
return python_libname
elif is_darwin:
# On MacPython, Analysis.assemble is able to find the libpython with
# no additional help, asking for sys.executable dependencies.
# However, this fails on system python, because the shared library
# is not listed as a dependency of the binary (most probably it's
# opened at runtime using some dlopen trickery).
# This happens on Mac OS X when Python is compiled as Framework.
# Python compiled as Framework contains same values in sys.prefix
# and exec_prefix. That's why we can use just sys.prefix.
# In virtualenv PyInstaller is not able to find Python library.
# We need special care for this case.
python_libname = _find_lib_in_libdirs(compat.base_prefix)
if python_libname:
return python_libname
# Python library NOT found. Provide helpful feedback.
msg = """Python library not found: %s
This would mean your Python installation doesn't come with proper library files.
This usually happens by missing development package, or unsuitable build parameters of Python installation.
* On Debian/Ubuntu, you would need to install Python development packages
* apt-get install python3-dev
* apt-get install python-dev
* If you're building Python by yourself, please rebuild your Python with `--enable-shared` (or, `--enable-framework` on Darwin)
""" % (", ".join(PYDYLIB_NAMES),)
raise IOError(msg)
def findSystemLibrary(name):
'''
Given a library name, try to resolve the path to that library. If the
path is already an absolute path, return that without searching.
'''
if os.path.isabs(name):
return name
if is_unix:
return findLibrary(name)
elif is_win:
return getfullnameof(name)
else:
# This seems to work, and is similar to what we have above..
return ctypes.util.find_library(name)
| gpl-3.0 |
TribusGNULinux/tribus | tribus/common/charms/bundle.py | 2 | 3818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Tribus Developers
#
# This file is part of Tribus.
#
# Tribus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tribus is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Contains a representation of a bundle charm.
CharmBundle is a representation of a ZIP file containing a Charm.
"""
import hashlib
import tempfile
import os
import stat
from zipfile import ZipFile, BadZipfile
from tribus.common.charms.base import CharmBase, get_revision
from tribus.common.charms.config import ConfigOptions
from tribus.common.charms.metadata import MetaData
from tribus.common.charms.directory import CharmDirectory
from tribus.common.errors import CharmError
from tribus.common.filehash import compute_file_hash
class CharmBundle(CharmBase):
"""ZIP-archive that contains charm directory content."""
type = 'bundle'
def __init__(self, path):
"""Set initial values and parse configuration files from the charm."""
self.path = isinstance(path, file) and path.name or path
try:
zf = ZipFile(path, 'r')
except BadZipfile, exc:
raise CharmError(path, 'must be a zip file (%s)' % exc)
if 'metadata.yaml' not in zf.namelist():
raise CharmError(path, ('charm does not contain required'
' file "metadata.yaml"'))
self.metadata = MetaData()
self.metadata.parse(zf.read('metadata.yaml'))
try:
revision_content = zf.read('revision')
except KeyError:
revision_content = None
self._revision = get_revision(revision_content, self.metadata,
self.path)
if self._revision is None:
raise CharmError(self.path, 'has no revision')
self.config = ConfigOptions()
if 'config.yaml' in zf.namelist():
self.config.parse(zf.read('config.yaml'))
def get_revision(self):
"""Get charm revision from bundle."""
return self._revision
def compute_sha256(self):
"""
Return the SHA256 digest for this charm bundle.
The digest is extracted out of the final bundle file itself.
"""
return compute_file_hash(hashlib.sha256, self.path)
def extract_to(self, directory_path):
"""Extract the bundle to folder and return a CharmDirectory handle."""
zf = ZipFile(self.path, 'r')
for info in zf.infolist():
mode = info.external_attr >> 16
if stat.S_ISLNK(mode):
source = zf.read(info.filename)
target = os.path.join(directory_path, info.filename)
if os.path.exists(target):
os.remove(target)
os.symlink(source, target)
continue
extract_path = zf.extract(info, directory_path)
os.chmod(extract_path, mode)
return CharmDirectory(directory_path)
def as_bundle(self):
"""Return the bundle as a CharmBundle instance."""
return self
def as_directory(self):
"""Return the bundle as a CharmDirectory using a temporary path."""
dn = tempfile.mkdtemp(prefix="tmp-charm-")
return self.extract_to(dn)
| gpl-3.0 |
SaberMod/gdb-saber | gdb/contrib/test_pubnames_and_indexes.py | 46 | 6368 | #! /usr/bin/env python
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This program requires readelf, gdb and objcopy. The default values are gdb
# from the build tree and objcopy and readelf from $PATH. They may be
# overridden by setting environment variables GDB, READELF and OBJCOPY
# respectively. We assume the current directory is either $obj/gdb or
# $obj/gdb/testsuite.
#
# Example usage:
#
# bash$ cd $objdir/gdb/testsuite
# bash$ python test_pubnames_and_indexes.py <binary_name>
"""test_pubnames_and_indexes.py
Test that the gdb_index produced by gold is identical to the gdb_index
produced by gdb itself.
Further check that the pubnames and pubtypes produced by gcc are identical
to those that gdb produces.
Finally, check that all strings are canonicalized identically.
"""
__author__ = 'saugustine@google.com (Sterling Augustine)'
import os
import subprocess
import sys
OBJCOPY = None
READELF = None
GDB = None
def get_pub_info(filename, readelf_option):
"""Parse and return all the pubnames or pubtypes produced by readelf with the
given option.
"""
readelf = subprocess.Popen([READELF, '--debug-dump=' + readelf_option,
filename], stdout=subprocess.PIPE)
pubnames = []
in_list = False;
for line in readelf.stdout:
fields = line.split(None, 1)
if (len(fields) == 2 and fields[0] == 'Offset'
and fields[1].strip() == 'Name'):
in_list = True
# Either a blank-line or a new Length field terminates the current section.
elif (len(fields) == 0 or fields[0] == 'Length:'):
in_list = False;
elif (in_list):
pubnames.append(fields[1].strip())
readelf.wait()
return pubnames
def get_gdb_index(filename):
"""Use readelf to dump the gdb index and collect the types and names"""
readelf = subprocess.Popen([READELF, '--debug-dump=gdb_index',
filename], stdout=subprocess.PIPE)
index_symbols = []
symbol_table_started = False
for line in readelf.stdout:
if (line == 'Symbol table:\n'):
symbol_table_started = True;
elif (symbol_table_started):
# Readelf prints gdb-index lines formatted like so:
# [ 4] two::c2<double>::c2: 0
# So take the string between the first close bracket and the last colon.
index_symbols.append(line[line.find(']') + 2: line.rfind(':')])
readelf.wait()
return index_symbols
def CheckSets(list0, list1, name0, name1):
"""Report any setwise differences between the two lists"""
if len(list0) == 0 or len(list1) == 0:
return False
difference0 = set(list0) - set(list1)
if len(difference0) != 0:
print "Elements in " + name0 + " but not " + name1 + ": (",
print len(difference0),
print ")"
for element in difference0:
print " " + element
difference1 = set(list1) - set(list0)
if len(difference1) != 0:
print "Elements in " + name1 + " but not " + name0 + ": (",
print len(difference1),
print ")"
for element in difference1:
print " " + element
if (len(difference0) != 0 or len(difference1) != 0):
return True
print name0 + " and " + name1 + " are identical."
return False
def find_executables():
"""Find the copies of readelf, objcopy and gdb to use."""
# Executable finding logic follows cc-with-index.sh
global READELF
READELF = os.getenv('READELF')
if READELF is None:
READELF = 'readelf'
global OBJCOPY
OBJCOPY = os.getenv('OBJCOPY')
if OBJCOPY is None:
OBJCOPY = 'objcopy'
global GDB
GDB = os.getenv('GDB')
if (GDB is None):
if os.path.isfile('./gdb') and os.access('./gdb', os.X_OK):
GDB = './gdb'
elif os.path.isfile('../gdb') and os.access('../gdb', os.X_OK):
GDB = '../gdb'
elif os.path.isfile('../../gdb') and os.access('../../gdb', os.X_OK):
GDB = '../../gdb'
else:
# Punt and use the gdb in the path.
GDB = 'gdb'
def main(argv):
"""The main subprogram."""
if len(argv) != 2:
print "Usage: test_pubnames_and_indexes.py <filename>"
sys.exit(2)
find_executables();
# Get the index produced by Gold--It should have been built into the binary.
gold_index = get_gdb_index(argv[1])
# Collect the pubnames and types list
pubs_list = get_pub_info(argv[1], "pubnames")
pubs_list = pubs_list + get_pub_info(argv[1], "pubtypes")
# Generate a .gdb_index with gdb
gdb_index_file = argv[1] + '.gdb-generated-index'
subprocess.check_call([OBJCOPY, '--remove-section', '.gdb_index',
argv[1], gdb_index_file])
subprocess.check_call([GDB, '-batch', '-nx', gdb_index_file,
'-ex', 'save gdb-index ' + os.path.dirname(argv[1]),
'-ex', 'quit'])
subprocess.check_call([OBJCOPY, '--add-section',
'.gdb_index=' + gdb_index_file + '.gdb-index',
gdb_index_file])
gdb_index = get_gdb_index(gdb_index_file)
os.remove(gdb_index_file)
os.remove(gdb_index_file + '.gdb-index')
failed = False
gdb_index.sort()
gold_index.sort()
pubs_list.sort()
# Find the differences between the various indices.
if len(gold_index) == 0:
print "Gold index is empty"
failed |= True
if len(gdb_index) == 0:
print "Gdb index is empty"
failed |= True
if len(pubs_list) == 0:
print "Pubs list is empty"
failed |= True
failed |= CheckSets(gdb_index, gold_index, "gdb index", "gold index")
failed |= CheckSets(pubs_list, gold_index, "pubs list", "gold index")
failed |= CheckSets(pubs_list, gdb_index, "pubs list", "gdb index")
if failed:
print "Test failed"
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 |
arborh/tensorflow | tensorflow/lite/python/lite.py | 3 | 47752 | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import warnings
import six
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func
from tensorflow.lite.python.util import convert_debug_info_func as _convert_debug_info_func
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_debug_info as _get_debug_info
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.keras.saving import saving_utils as _saving_utils
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating tflite graphs.
Some optimizations may come at the cost of accuracy.
"""
# Default optimization strategy.
#
# Converter will do its best to improve size and latency based on the
# information provided.
# Enhanced optimizations can be gained by providing a representative_dataset.
# This is recommended, and is currently equivalent to the modes below.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
DEFAULT = "DEFAULT"
# Optimize for size.
#
# Optimizations that reduce the size of the model.
# The model size will be reduced.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Optimize for latency.
#
# Optimizations that reduce the latency of the model.
# Currently, weights will be quantized and if representative_dataset is
# provided, activations for quantizable operations will also be quantized.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
def __str__(self):
return self.value
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset to evaluate optimizations.
A representative dataset that can be used to evaluate optimizations by the
converter. E.g. converter can use these examples to estimate (min, max) ranges
by calibrating the model on inputs. This can allow converter to quantize a
converted floating point model.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: an input generator that can be used to generate input samples
for the model. This must be a callable object that returns an object
that supports the `iter()` protocol (e.g. a generator function). The
elements generated must have same type and shape as inputs to the model.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device.
Details about target device. Converter optimizes the generated model for
specific device.
Attributes:
supported_ops: Experimental flag, subject to change. Set of OpsSet options
supported by the device. (default set([OpsSet.TFLITE_BUILTINS]))
supported_types: List of types for constant values on the target device.
Supported values are types exported by lite.constants. Frequently, an
optimization choice is driven by the most compact (i.e. smallest) type in
this list (default [constants.FLOAT])
"""
def __init__(self, supported_ops=None, supported_types=None):
if supported_ops is None:
supported_ops = set([OpsSet.TFLITE_BUILTINS])
self.supported_ops = supported_ops
if supported_types is None:
supported_types = []
self.supported_types = supported_types
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.allow_custom_ops = False
self.target_spec = TargetSpec()
self.optimizations = []
self.representative_dataset = None
self.experimental_new_converter = False
self.experimental_new_quantizer = False
# The 'GraphDebugInfo' contains the stack traces of all the original nodes
# in the `GraphDef` to the converter.
self._debug_info = None
def _grappler_config(self):
is_only_flex_enabled = (
set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops))
optimizers = ["constfold"]
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _validate_representative_dataset(self):
if self.representative_dataset:
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
if self.representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
elif self._is_int8_target_required():
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 or INT8 supported types.")
def _validate_quantization(self):
if self._is_int8_target_required():
if self.target_spec.supported_types and (self._smallest_supported_type()
!= constants.INT8):
raise ValueError("TFLITE_BUILTINS_INT8 requires smallest supported "
"type to be INT8.")
def _is_int8_target_required(self):
return (set([OpsSet.TFLITE_BUILTINS_INT8]) == set(
self.target_spec.supported_ops) or
self._smallest_supported_type() == constants.INT8)
def _smallest_supported_type(self):
if self.target_spec.supported_types:
return min(self.target_spec.supported_types, key=lambda x: x.size)
else:
return None
def _any_optimization_enabled(self):
return bool(
set(self.optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
]))
def _is_post_training_optimize(self):
return self._is_int8_target_required() or self._any_optimization_enabled()
def _is_int8_weight_only_quantize(self):
return (self._is_post_training_optimize() and
(self.representative_dataset is None))
def _is_float16_quantize(self):
return self._any_optimization_enabled() and (
self._smallest_supported_type() == constants.FLOAT16)
def _is_calibration_quantize(self):
return (self._is_post_training_optimize() and
self.representative_dataset and
self._smallest_supported_type() != constants.FLOAT16)
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type, enable_mlir_quantizer):
allow_float = not self._is_int8_target_required()
calibrate_quantize = _calibrator.Calibrator(result)
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float, enable_mlir_quantizer)
def _get_base_converter_args(self):
"""Returns the base converter args.
Returns:
{key str: val}
"""
float16_quantize = self._is_float16_quantize()
args = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": (self._is_int8_weight_only_quantize() or
float16_quantize),
"quantize_to_float16": float16_quantize,
"debug_info": self._debug_info,
"target_ops": self.target_spec.supported_ops,
"enable_mlir_converter": self.experimental_new_converter,
}
return args
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteConverterBase):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use the
dataset to evaluate different optimizations.
experimental_new_converter: Experimental flag, subject to change.
Enables MLIR-based conversion instead of TOCO conversion.
experimental_new_quantizer: Experimental flag, subject to change.
Enables MLIR-based post-training quantization.
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
@classmethod
def from_concrete_functions(cls, funcs):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements. Currently converter can only convert a single
ConcreteFunction. Converting multiple functions is under development.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call from_concrete_function.")
raise ValueError(message)
return cls(funcs)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set(SERVING))
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
input_signature = None
# If the model's call is not a `tf.function`, then we need to first get its
# input signature from `model_input_signature` method. We can't directly
# call `trace_model_call` because otherwise the batch dimension is set
# to None.
# Once we have better support for dynamic shapes, we can remove this.
if not isinstance(model.call, _def_function.Function):
# Pass `keep_original_batch_size=True` will ensure that we get an input
# signature including the batch dimension specified by the user.
input_signature = _saving_utils.model_input_signature(
model, keep_original_batch_size=True)
func = _saving_utils.trace_model_call(model, input_signature)
concrete_func = func.get_concrete_function()
return cls([concrete_func])
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# TODO(b/130297984): Add support for converting multiple function.
if len(self._funcs) != 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
# graph_def is used here to preserve the node bug information
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=self._grappler_config(),
graph=frozen_func.graph)
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
self._validate_quantization()
self._validate_representative_dataset()
if self._trackable_obj is None:
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
graph_def)
converter_kwargs = self._get_base_converter_args()
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(
result, constants.FLOAT, constants.FLOAT,
self.experimental_new_quantizer)
return result
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteConverterBase):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this
parameter is ignored. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays.
If an integer type is provided and `optimizations` are not used,
`quantized_inputs_stats` must be provided.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained input model, then
`inference_input_type` defaults to tf.uint8.
In all other cases, `inference_input_type` defaults to tf.float32.
Must be `{tf.float32, tf.uint8, tf.int8}`
inference_output_type: Target data type of real-number output arrays. Allows
for a different type for output arrays.
If `inference_type` is tf.uint8, signaling conversion to a fully quantized
model from a quantization-aware trained output model, then
`inference_output_type` defaults to tf.uint8.
In all other cases, `inference_output_type` must be tf.float32, an error
will be thrown otherwise.
Must be `{tf.float32, tf.uint8, tf.int8}`
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for
`optimizations` instead. Boolean indicating whether to quantize the
weights of the converted float model. Model size will be reduced and
there will be latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
conversion_summary_dir: A string indicating the path to the generated
conversion logs.
target_ops: Deprecated. Please specify `target_spec.supported_ops` instead.
Set of OpsSet options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
target_spec: Experimental flag, subject to change. Specification of target
device.
optimizations: Experimental flag, subject to change. A list of optimizations
to apply when converting the model. E.g. `[Optimize.DEFAULT]`
representative_dataset: A representative dataset that can be used to
generate input and output samples for the model. The converter can use
the dataset to evaluate different optimizations.
experimental_new_converter: Experimental flag, subject to change.
Enables MLIR-based conversion instead of TOCO conversion.
experimental_new_quantizer: Experimental flag, subject to change.
Enables MLIR-based post-training quantization.
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TFLiteConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter, self).__init__()
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self._post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.conversion_summary_dir = None
self._debug_info_func = experimental_debug_info_func
self._custom_opdefs = None
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = six.ensure_text(file_content, "utf-8")
else:
file_content = six.ensure_binary(file_content, "utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0],
input_tensors=result[1],
output_tensors=result[2],
experimental_debug_info_func=_build_debug_info_func(result[3]))
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
function = _saving_utils.trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
return cls(
frozen_func.graph.as_graph_def(),
frozen_func.inputs,
frozen_func.outputs,
experimental_debug_info_func=_build_debug_info_func(
frozen_func.graph))
# Handles Keras when Eager mode is disabled.
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file, custom_objects)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
self._validate_quantization()
self._validate_representative_dataset()
toco_inference_input_type = self.inference_input_type
inference_input_type = self.inference_input_type
inference_output_type = self.inference_output_type
post_training_optimize = self._is_post_training_optimize()
if post_training_optimize:
# Post training optimizations require that TOCO outputs a float model.
if self.inference_type != constants.FLOAT:
raise ValueError(
"`optimizations` require that `inference_type` is set to float.")
toco_inference_input_type = constants.FLOAT
# Set up default values.
if inference_input_type is None:
inference_input_type = constants.FLOAT
if inference_output_type is None:
inference_output_type = constants.FLOAT
weight_only_quantize = self._is_int8_weight_only_quantize()
if weight_only_quantize:
# Currently, weight only quantization requires float inputs and outputs.
if (inference_input_type != constants.FLOAT or
inference_output_type != constants.FLOAT):
raise ValueError(
"Provide an inference_input_type and inference_output_type of type "
"tf.float32.")
if not post_training_optimize and self.inference_output_type is not None:
raise ValueError(
"inference_output_type is currently not supported if optimizations "
"are not enabled.")
optimized_graph = self._graph_def
if self.inference_type != constants.QUANTIZED_UINT8:
try:
optimized_graph = _run_graph_optimizations(
self._graph_def,
self._input_tensors,
self._output_tensors,
config=self._grappler_config())
except Exception:
optimized_graph = self._graph_def
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update({
"inference_type": self.inference_type,
"inference_input_type": toco_inference_input_type,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
"custom_opdefs": self._custom_opdefs,
})
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if self._is_calibration_quantize():
result = self._calibrate_quantize_model(
result, inference_input_type, inference_output_type,
self.experimental_new_quantizer)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
if shape[0] is None:
shape[0] = batch_size
tensor.set_shape(shape)
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
| apache-2.0 |
forumber/temiz_kernel_g2 | toolchain/share/gdb/python/gdb/prompt.py | 124 | 4210 | # Extended prompt utilities.
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
| gpl-2.0 |
peiyuwang/pants | src/python/pants/engine/legacy/graph.py | 1 | 14329 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_app import Bundle, JvmApp
from pants.base.exceptions import TargetDefinitionException
from pants.base.parse_context import ParseContext
from pants.base.specs import SingleAddress
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_graph import BuildGraph
from pants.build_graph.remote_sources import RemoteSources
from pants.engine.addressable import Addresses, Collection
from pants.engine.fs import PathGlobs, Snapshot
from pants.engine.legacy.structs import BundleAdaptor, BundlesField, SourcesField, TargetAdaptor
from pants.engine.nodes import Return
from pants.engine.selectors import Select, SelectDependencies, SelectProjection, SelectTransitive
from pants.source.wrapped_globs import EagerFilesetWithSpec, FilesetRelPathWrapper
from pants.util.dirutil import fast_relpath
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class _DestWrapper(datatype('DestWrapper', ['target_types'])):
"""A wrapper for dest field of RemoteSources target.
This is only used when instantiating RemoteSources target.
"""
class LegacyBuildGraph(BuildGraph):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
This implementation is backed by a Scheduler that is able to resolve HydratedTargets.
"""
class InvalidCommandLineSpecError(AddressLookupError):
"""Raised when command line spec is not a valid directory"""
def __init__(self, scheduler, engine, symbol_table_cls):
"""Construct a graph given a Scheduler, Engine, and a SymbolTable class.
:param scheduler: A Scheduler that is configured to be able to resolve HydratedTargets.
:param engine: An Engine subclass to execute calls to `inject`.
:param symbol_table_cls: A SymbolTable class used to instantiate Target objects. Must match
the symbol table installed in the scheduler (TODO: see comment in `_instantiate_target`).
"""
self._scheduler = scheduler
self._target_types = self._get_target_types(symbol_table_cls)
self._engine = engine
super(LegacyBuildGraph, self).__init__()
def _get_target_types(self, symbol_table_cls):
aliases = symbol_table_cls.aliases()
target_types = dict(aliases.target_types)
for alias, factory in aliases.target_macro_factories.items():
target_type, = factory.target_types
target_types[alias] = target_type
return target_types
def _index(self, roots):
"""Index from the given roots into the storage provided by the base class.
This is an additive operation: any existing connections involving these nodes are preserved.
"""
all_addresses = set()
new_targets = list()
# Index the ProductGraph.
for node, state in roots.items():
if type(state) is not Return:
trace = '\n'.join(self._scheduler.trace())
raise AddressLookupError(
'Build graph construction failed for {}:\n{}'.format(node, trace))
if type(state.value) is not HydratedTargets:
raise TypeError('Expected roots to hold {}; got: {}'.format(
HydratedTargets, type(state.value)))
# We have a successful HydratedTargets value (for a particular input Spec).
for hydrated_target in state.value.dependencies:
target_adaptor = hydrated_target.adaptor
address = target_adaptor.address
all_addresses.add(address)
if address not in self._target_by_address:
new_targets.append(self._index_target(target_adaptor))
# Once the declared dependencies of all targets are indexed, inject their
# additional "traversable_(dependency_)?specs".
deps_to_inject = OrderedSet()
addresses_to_inject = set()
def inject(target, dep_spec, is_dependency):
address = Address.parse(dep_spec, relative_to=target.address.spec_path)
if not any(address == t.address for t in target.dependencies):
addresses_to_inject.add(address)
if is_dependency:
deps_to_inject.add((target.address, address))
for target in new_targets:
for spec in target.traversable_dependency_specs:
inject(target, spec, is_dependency=True)
for spec in target.traversable_specs:
inject(target, spec, is_dependency=False)
# Inject all addresses, then declare injected dependencies.
self.inject_addresses_closure(addresses_to_inject)
for target_address, dep_address in deps_to_inject:
self.inject_dependency(dependent=target_address, dependency=dep_address)
return all_addresses
def _index_target(self, target_adaptor):
"""Instantiate the given TargetAdaptor, index it in the graph, and return a Target."""
# Instantiate the target.
address = target_adaptor.address
target = self._instantiate_target(target_adaptor)
self._target_by_address[address] = target
# Link its declared dependencies, which will be indexed independently.
self._target_dependencies_by_address[address].update(target_adaptor.dependencies)
for dependency in target_adaptor.dependencies:
self._target_dependees_by_address[dependency].add(address)
return target
def _instantiate_target(self, target_adaptor):
"""Given a TargetAdaptor struct previously parsed from a BUILD file, instantiate a Target.
TODO: This assumes that the SymbolTable used for parsing matches the SymbolTable passed
to this graph. Would be good to make that more explicit, but it might be better to nuke
the Target subclassing pattern instead, and lean further into the "configuration composition"
model explored in the `exp` package.
"""
target_cls = self._target_types[target_adaptor.type_alias]
try:
# Pop dependencies, which were already consumed during construction.
kwargs = target_adaptor.kwargs()
kwargs.pop('dependencies')
# Instantiate.
if target_cls is JvmApp:
return self._instantiate_jvm_app(kwargs)
elif target_cls is RemoteSources:
return self._instantiate_remote_sources(kwargs)
return target_cls(build_graph=self, **kwargs)
except TargetDefinitionException:
raise
except Exception as e:
raise TargetDefinitionException(
target_adaptor.address,
'Failed to instantiate Target with type {}: {}'.format(target_cls, e))
def _instantiate_jvm_app(self, kwargs):
"""For JvmApp target, convert BundleAdaptor to BundleProps."""
parse_context = ParseContext(kwargs['address'].spec_path, dict())
bundleprops_factory = Bundle(parse_context)
kwargs['bundles'] = [
bundleprops_factory.create_bundle_props(bundle)
for bundle in kwargs['bundles']
]
return JvmApp(build_graph=self, **kwargs)
def _instantiate_remote_sources(self, kwargs):
"""For RemoteSources target, convert "dest" field to its real target type."""
kwargs['dest'] = _DestWrapper((self._target_types[kwargs['dest']],))
return RemoteSources(build_graph=self, **kwargs)
def inject_synthetic_target(self,
address,
target_type,
dependencies=None,
derived_from=None,
**kwargs):
target = target_type(name=address.target_name,
address=address,
build_graph=self,
**kwargs)
self.inject_target(target,
dependencies=dependencies,
derived_from=derived_from,
synthetic=True)
def inject_address_closure(self, address):
self.inject_addresses_closure([address])
def inject_addresses_closure(self, addresses):
addresses = set(addresses) - set(self._target_by_address.keys())
if not addresses:
return
matched = set(self._inject([SingleAddress(a.spec_path, a.target_name) for a in addresses]))
missing = addresses - matched
if missing:
# TODO: When SingleAddress resolution converted from projection of a directory
# and name to a match for PathGlobs, we lost our useful AddressLookupError formatting.
raise AddressLookupError('Addresses were not matched: {}'.format(missing))
def inject_specs_closure(self, specs, fail_fast=None):
# Request loading of these specs.
for address in self._inject(specs):
yield address
def resolve_address(self, address):
if not self.contains_address(address):
self.inject_address_closure(address)
return self.get_target(address)
def _inject(self, subjects):
"""Inject Targets into the graph for each of the subjects and yield the resulting addresses."""
logger.debug('Injecting to %s: %s', self, subjects)
request = self._scheduler.execution_request([HydratedTargets, Addresses], subjects)
result = self._engine.execute(request)
if result.error:
raise result.error
# Update the base class indexes for this request.
root_entries = self._scheduler.root_entries(request)
address_entries = {k: v for k, v in root_entries.items() if k[1].product is Addresses}
target_entries = {k: v for k, v in root_entries.items() if k[1].product is HydratedTargets}
self._index(target_entries)
yielded_addresses = set()
for (subject, _), state in address_entries.items():
if not state.value.dependencies:
raise self.InvalidCommandLineSpecError(
'Spec {} does not match any targets.'.format(subject))
for address in state.value.dependencies:
if address not in yielded_addresses:
yielded_addresses.add(address)
yield address
class HydratedTarget(datatype('HydratedTarget', ['address', 'adaptor', 'dependencies'])):
"""A wrapper for a fully hydrated TargetAdaptor object.
Transitive graph walks collect ordered sets of HydratedTargets which involve a huge amount
of hashing: we implement eq/hash via direct usage of an Address field to speed that up.
"""
def __eq__(self, other):
if type(self) != type(other):
return False
return self.address == other.address
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.address)
# TODO: Only used (currently) to represent transitive hydrated targets. Consider renaming.
HydratedTargets = Collection.of(HydratedTarget)
class HydratedField(datatype('HydratedField', ['name', 'value'])):
"""A wrapper for a fully constructed replacement kwarg for a HydratedTarget."""
def hydrate_target(target_adaptor, hydrated_fields):
"""Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields."""
# Hydrate the fields of the adaptor and re-construct it.
kwargs = target_adaptor.kwargs()
for field in hydrated_fields:
kwargs[field.name] = field.value
return HydratedTarget(target_adaptor.address,
TargetAdaptor(**kwargs),
tuple(target_adaptor.dependencies))
def _eager_fileset_with_spec(spec_path, filespec, snapshot):
files = tuple(fast_relpath(fd.path, spec_path) for fd in snapshot.files)
relpath_adjusted_filespec = FilesetRelPathWrapper.to_filespec(filespec['globs'], spec_path)
if filespec.has_key('exclude'):
relpath_adjusted_filespec['exclude'] = [FilesetRelPathWrapper.to_filespec(e['globs'], spec_path)
for e in filespec['exclude']]
# NB: In order to preserve declared ordering, we record a list of matched files
# independent of the file hash dict.
return EagerFilesetWithSpec(spec_path,
relpath_adjusted_filespec,
files=files,
files_hash=snapshot.fingerprint)
def hydrate_sources(sources_field, snapshot):
"""Given a SourcesField and a Snapshot for its path_globs, create an EagerFilesetWithSpec."""
fileset_with_spec = _eager_fileset_with_spec(sources_field.address.spec_path,
sources_field.filespecs,
snapshot)
return HydratedField(sources_field.arg, fileset_with_spec)
def hydrate_bundles(bundles_field, snapshot_list):
"""Given a BundlesField and a Snapshot for each of its filesets create a list of BundleAdaptors."""
bundles = []
zipped = zip(bundles_field.bundles,
bundles_field.filespecs_list,
snapshot_list)
for bundle, filespecs, snapshot in zipped:
spec_path = bundles_field.address.spec_path
kwargs = bundle.kwargs()
kwargs['fileset'] = _eager_fileset_with_spec(getattr(bundle, 'rel_path', spec_path),
filespecs,
snapshot)
bundles.append(BundleAdaptor(**kwargs))
return HydratedField('bundles', bundles)
def create_legacy_graph_tasks(symbol_table_cls):
"""Create tasks to recursively parse the legacy graph."""
symbol_table_constraint = symbol_table_cls.constraint()
return [
# Recursively requests HydratedTargets, which will result in an eager, transitive graph walk.
(HydratedTargets,
[SelectTransitive(HydratedTarget,
Addresses,
field_types=(Address,))],
HydratedTargets),
(HydratedTarget,
[Select(symbol_table_constraint),
SelectDependencies(HydratedField,
symbol_table_constraint,
'field_adaptors',
field_types=(SourcesField, BundlesField,))],
hydrate_target),
(HydratedField,
[Select(SourcesField),
SelectProjection(Snapshot, PathGlobs, ('path_globs',), SourcesField)],
hydrate_sources),
(HydratedField,
[Select(BundlesField),
SelectDependencies(Snapshot, BundlesField, 'path_globs_list', field_types=(PathGlobs,))],
hydrate_bundles),
]
| apache-2.0 |
muffinresearch/addons-server | apps/amo/management/commands/extract_loc.py | 22 | 1526 | import os
import re
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management.base import BaseCommand
from amo.storage_utils import walk_storage
_loc_re = re.compile(r"""\\?(loc)\(.*?\)""", (re.M | re.S))
_exts = ('.py', '.html')
_root = settings.ROOT
_subs = tuple([os.path.join(_root, s) for s in ['mkt']])
class Command(BaseCommand):
"""
A very simple parser to find string marked with loc in py and html.
This is rather naive, so don't worry about it being perfect, it's just
so that we can find all the strings for the marketplace and pass them on
to UX people. Or you could do a fancy grep.
"""
def handle(self, *args, **options):
count = 0
for root, folders, files in walk_storage(_root):
if not root.startswith(_subs):
continue
for fname in files:
fname = os.path.join(root, fname)
if fname.endswith(_exts):
data = storage.open(fname).read()
found = False
for match in _loc_re.finditer(data):
if not found:
found = True
print fname
print '-' * len(fname)
print match.string[match.start():match.end()]
count += 1
if found:
print
print 'Strings found:', count
| bsd-3-clause |
alex/django-old | django/utils/text.py | 69 | 9831 | import re
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
capfirst = allow_lazy(capfirst, unicode)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_unicode(text)
def _generator():
it = iter(text.split(' '))
word = it.next()
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return u''.join(_generator())
wrap = allow_lazy(wrap, unicode)
def truncate_words(s, num, end_text='...'):
"""Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...)
Newlines in the string will be stripped.
"""
s = force_unicode(s)
length = int(num)
words = s.split()
if len(words) > length:
words = words[:length]
if not words[-1].endswith(end_text):
words.append(end_text)
return u' '.join(words)
truncate_words = allow_lazy(truncate_words, unicode)
def truncate_html_words(s, num, end_text='...'):
"""Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html. Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to ellipsis (...).
Newlines in the HTML are preserved.
"""
s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:end_text_pos]
if end_text:
out += ' ' + end_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
truncate_html_words = allow_lazy(truncate_html_words, unicode)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
u'johns_portrait_in_2004.jpg'
"""
s = force_unicode(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, unicode)
def get_text_list(list_, last_word=ugettext_lazy(u'or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
u'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
u'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
u'a and b'
>>> get_text_list(['a'])
u'a'
>>> get_text_list([])
u''
"""
if len(list_) == 0: return u''
if len(list_) == 1: return force_unicode(list_[0])
return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1]))
get_text_list = allow_lazy(get_text_list, unicode)
def normalize_newlines(text):
return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, unicode)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_unicode(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
letters = re.compile(r'[A-Z]', re.I)
char2number = lambda m: {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3',
'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}.get(m.group(0).lower())
return letters.sub(char2number, phone)
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
import cStringIO, gzip
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
ustring_re = re.compile(u"([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != unicode:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, unicode)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
[u'This', u'is', u'"a person\\\'s"', u'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
[u'Another', u"'person\\'s'", u'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
[u'A', u'"\\"funky\\" style"', u'test.']
"""
text = force_unicode(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
| bsd-3-clause |
rysson/filmkodi | plugin.video.xbmcfilm/resources/lib/jsbeautifier/unpackers/tests/testmyobfuscate.py | 111 | 1163 | #
# written by Stefano Sanfilippo <a.little.coder@gmail.com>
#
"""Tests for MyObfuscate unpacker."""
import unittest
import os
from jsbeautifier.unpackers.myobfuscate import detect, unpack
from jsbeautifier.unpackers.tests import __path__ as path
INPUT = os.path.join(path[0], 'test-myobfuscate-input.js')
OUTPUT = os.path.join(path[0], 'test-myobfuscate-output.js')
# pylint: disable=R0904
class TestMyObfuscate(unittest.TestCase):
# pylint: disable=C0103
"""MyObfuscate obfuscator testcase."""
@classmethod
def setUpClass(cls):
"""Load source files (encoded and decoded version) for tests."""
with open(INPUT, 'r') as data:
cls.input = data.read()
with open(OUTPUT, 'r') as data:
cls.output = data.read()
def test_detect(self):
"""Test detect() function."""
detected = lambda source: self.assertTrue(detect(source))
detected(self.input)
def test_unpack(self):
"""Test unpack() function."""
check = lambda inp, out: self.assertEqual(unpack(inp), out)
check(self.input, self.output)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
minlexx/pyevemon | esi_client/models/get_corporations_npccorps_internal_server_error.py | 1 | 3114 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCorporationsNpccorpsInternalServerError(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetCorporationsNpccorpsInternalServerError - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetCorporationsNpccorpsInternalServerError.
Internal server error message
:return: The error of this GetCorporationsNpccorpsInternalServerError.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetCorporationsNpccorpsInternalServerError.
Internal server error message
:param error: The error of this GetCorporationsNpccorpsInternalServerError.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCorporationsNpccorpsInternalServerError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 |
MaPePeR/numpy | numpy/lib/tests/test_recfunctions.py | 45 | 31143 | from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import TestCase, run_module_suite, assert_
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(TestCase):
# Misc tests
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
class TestRecursiveFillFields(TestCase):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(TestCase):
# Test merge_arrays
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(TestCase):
# Test append_fields
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(TestCase):
# Test stack_arrays
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
self.assertTrue(test is x)
test = stack_arrays(x)
assert_equal(test, x)
self.assertTrue(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
try:
test = stack_arrays((a, b), autoconvert=False)
except TypeError:
pass
else:
raise AssertionError
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
class TestJoinBy(TestCase):
def setUp(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
class TestJoinBy2(TestCase):
@classmethod
def setUp(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
self.assertRaises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(TestCase):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setUp(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
def test_append_with_objects(self):
"Test append_fields when the appended data contains objects"
obj = self.data['obj']
x = np.array([(10, 1.), (20, 2.)], dtype=[('A', int), ('B', float)])
y = np.array([obj, obj], dtype=object)
test = append_fields(x, 'C', data=y, dtypes=object, usemask=False)
control = np.array([(10, 1.0, obj), (20, 2.0, obj)],
dtype=[('A', int), ('B', float), ('C', object)])
assert_equal(test, control)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
pbmanis/acq4 | acq4/devices/MultiClamp/taskGUI.py | 3 | 13805 | # -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
import sys
from acq4.devices.Device import TaskGui
from acq4.util.SequenceRunner import *
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
import numpy
from .TaskTemplate import *
from acq4.util.debug import *
import sip
class MultiClampTaskGui(TaskGui):
#sigSequenceChanged = Qt.Signal(object) ## defined upstream
def __init__(self, dev, taskRunner):
TaskGui.__init__(self, dev, taskRunner)
daqDev = self.dev.getDAQName()
self.daqUI = self.taskRunner.getDevice(daqDev)
self.traces = {} ## Stores traces from a sequence to allow average plotting
self.resetInpPlots = False ## Signals result handler to clear plots before adding a new one
self.currentCmdPlot = None
self._block_update = False # blocks plotting during state changes
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.splitter_2.setStretchFactor(0, 0)
self.ui.splitter_2.setStretchFactor(1, 1)
self.ui.splitter.setStretchFactor(0, 3)
self.ui.splitter.setStretchFactor(1, 1)
self.stateGroup = WidgetGroup(self)
#self.ui.waveGeneratorWidget.setTimeScale(1e-3)
self.ui.waveGeneratorWidget.setMeta('x', units='s', siPrefix=True, dec=True, step=0.5, minStep=1e-6)
self.unitLabels = [self.ui.waveGeneratorLabel, self.ui.holdingCheck]
#self.modeSignalList = self.dev.listModeSignals()
self.mode = None
self.setMode('I=0')
self.ui.topPlotWidget.registerPlot(self.dev.name() + '.Input')
self.ui.topPlotWidget.setDownsampling(ds=True, auto=True, mode='peak')
self.ui.topPlotWidget.setClipToView(True)
self.ui.bottomPlotWidget.registerPlot(self.dev.name() + '.Command')
self.ui.bottomPlotWidget.setDownsampling(ds=True, auto=True, mode='peak')
self.ui.bottomPlotWidget.setClipToView(True)
self.daqChanged(self.daqUI.currentState())
self.daqUI.sigChanged.connect(self.daqChanged)
self.ui.waveGeneratorWidget.sigDataChanged.connect(self.updateWaves)
self.ui.waveGeneratorWidget.sigParametersChanged.connect(self.sequenceChanged)
self.stateGroup.sigChanged.connect(self.uiStateChanged)
self.dev.sigStateChanged.connect(self.devStateChanged)
self.dev.sigHoldingChanged.connect(self.devHoldingChanged)
self.uiStateChanged('', '')
self.devStateChanged()
def uiStateChanged(self, name, value):
if 'ModeRadio' in name:
self.setMode()
if self.getMode() == 'I=0':
self.ui.holdingCheck.setChecked(False)
self.ui.holdingCheck.setEnabled(False)
else:
self.ui.holdingCheck.setEnabled(True)
checkMap = {
'holdingCheck': self.ui.holdingSpin,
'primarySignalCheck': self.ui.primarySignalCombo,
'secondarySignalCheck': self.ui.secondarySignalCombo,
'primaryGainCheck': self.ui.primaryGainSpin,
'secondaryGainCheck': self.ui.secondaryGainSpin,
}
## For each check box, enable its corresponding control
if name in checkMap:
checkMap[name].setEnabled(value)
self.devStateChanged()
def devStateChanged(self, state=None):
mode = self.getMode()
state = self.dev.getLastState(mode)
if not self.ui.holdingSpin.isEnabled():
self.ui.holdingSpin.setValue(state['holding'])
if not self.ui.primaryGainSpin.isEnabled():
self.ui.primaryGainSpin.setValue(state['primaryGain'])
if not self.ui.secondaryGainSpin.isEnabled():
self.ui.secondaryGainSpin.setValue(state['secondaryGain'])
psig = ssig = None
if not self.ui.primarySignalCombo.isEnabled():
psig = state['primarySignal']
if not self.ui.secondarySignalCombo.isEnabled():
ssig = state['secondarySignal']
self.setSignals(psig, ssig)
def devHoldingChanged(self, dev, mode):
if mode != self.getMode():
return
if not self.ui.holdingSpin.isEnabled():
state = self.dev.getLastState(mode)
self.ui.holdingSpin.setValue(state['holding'])
def saveState(self):
state = self.stateGroup.state().copy()
state['mode'] = self.getMode()
state['primarySignal'] = str(self.ui.primarySignalCombo.currentText())
state['secondarySignal'] = str(self.ui.secondarySignalCombo.currentText())
return state
def restoreState(self, state):
block = self._block_update
try:
self._block_update = True
self.setMode(state['mode'])
if 'primarySignal' in state and 'secondarySignal' in state:
self.setSignals(state['primarySignal'], state['secondarySignal'])
self.stateGroup.setState(state)
self.devStateChanged()
except:
printExc('Error while restoring MultiClamp task GUI state:')
finally:
self._block_update = block
self.updateWaves()
def daqChanged(self, state):
self.rate = state['rate']
self.numPts = state['numPts']
self.timeVals = numpy.linspace(0, float(self.numPts)/self.rate, self.numPts)
self.updateWaves()
def listSequence(self):
return self.ui.waveGeneratorWidget.listSequences()
def sequenceChanged(self):
self.sigSequenceChanged.emit(self.dev.name())
def updateWaves(self):
if self._block_update:
return
self.clearCmdPlots()
## compute sequence waves
params = {}
ps = self.ui.waveGeneratorWidget.listSequences()
for k in ps:
params[k] = range(len(ps[k]))
waves = []
runSequence(lambda p: waves.append(self.getSingleWave(p)), params, list(params.keys()))
# Plot all waves but disable auto-range first to improve performance.
autoRange = self.ui.bottomPlotWidget.getViewBox().autoRangeEnabled()
self.ui.bottomPlotWidget.enableAutoRange(x=False, y=False)
try:
for w in waves:
if w is not None:
self.plotCmdWave(w, color=Qt.QColor(100, 100, 100), replot=False)
## display single-mode wave in red
single = self.getSingleWave()
if single is not None:
p = self.plotCmdWave(single, color=Qt.QColor(200, 100, 100))
p.setZValue(1000)
finally:
# re-enable auto range if needed
self.ui.bottomPlotWidget.enableAutoRange(x=autoRange[0], y=autoRange[1])
def clearCmdPlots(self):
self.ui.bottomPlotWidget.clear()
self.currentCmdPlot = None
def taskSequenceStarted(self):
self.resetInpPlots = True
def clearInpPlots(self):
self.traces = {}
self.ui.topPlotWidget.clear()
def taskStarted(self, params):
## Draw green trace for current command waveform
if self.currentCmdPlot is not None:
self.ui.bottomPlotWidget.removeItem(self.currentCmdPlot)
params = dict([(p[1], params[p]) for p in params if p[0] == self.dev.name()])
cur = self.getSingleWave(params)
if cur is not None:
self.currentCmdPlot = self.plotCmdWave(cur, color=Qt.QColor(100, 200, 100))
self.currentCmdPlot.setZValue(1001)
def plotCmdWave(self, data, color=Qt.QColor(100, 100, 100), replot=True):
if data is None:
return
plot = self.ui.bottomPlotWidget.plot(data, x=self.timeVals)
plot.setPen(Qt.QPen(color))
return plot
def generateTask(self, params=None):
state = self.stateGroup.state()
if params is None:
params = {}
task = {}
mode = self.getMode()
task['mode'] = mode
task['recordState'] = True
#if self.ui.primarySignalCheck.isChecked():
#task['primary'] = self.ui.primarySignalCombo.currentText()
#if self.ui.secondarySignalCheck.isChecked():
#task['secondary'] = self.ui.secondarySignalCombo.currentText()
if state['primarySignalCheck']:
task['primarySignal'] = state['primarySignalCombo']
if state['secondarySignalCheck']:
task['secondarySignal'] = state['secondarySignalCombo']
if state['primaryGainCheck']:
task['primaryGain'] = state['primaryGainSpin']
if state['secondaryGainCheck']:
task['secondaryGain'] = state['secondaryGainSpin']
if mode != 'I=0':
## Must scale command to V or A before sending to task system.
wave = self.getSingleWave(params)
if wave is not None:
task['command'] = wave
if state['holdingCheck']:
task['holding'] = state['holdingSpin']
#print "Task:", task
return task
def getSingleWave(self, params=None):
state = self.stateGroup.state()
h = state['holdingSpin']
self.ui.waveGeneratorWidget.setOffset(h)
## waveGenerator generates values in V or A
wave = self.ui.waveGeneratorWidget.getSingle(self.rate, self.numPts, params)
if wave is None:
return None
return wave
def getMode(self):
if self.ui.icModeRadio.isChecked():
self.mode = 'IC'
elif self.ui.i0ModeRadio.isChecked():
self.mode = 'I=0'
else:
self.mode = 'VC'
return self.mode
def setMode(self, mode=None):
if mode != self.mode:
oldMode = self.mode
if mode is None:
mode = self.getMode()
#print "Set mode to", mode
# set radio button
if mode == 'IC':
self.ui.icModeRadio.setChecked(True)
elif mode == 'I=0':
self.ui.i0ModeRadio.setChecked(True)
else:
self.ui.vcModeRadio.setChecked(True)
# update signal lists
self.stateGroup.blockSignals(True)
sigs = self.dev.listSignals(mode)
#print "Signals:", sigs
#print "-------"
for s, c in [(sigs[0], self.ui.primarySignalCombo),(sigs[1], self.ui.secondarySignalCombo)]:
c.clear()
for ss in s:
c.addItem(ss)
self.stateGroup.blockSignals(False)
# Disable signal, holding, and gain checks (only when switching between v and i modes)
if mode == 'VC' or oldMode == 'VC':
self.ui.primarySignalCheck.setChecked(False)
self.ui.secondarySignalCheck.setChecked(False)
self.ui.holdingCheck.setChecked(False)
self.ui.primaryGainCheck.setChecked(False)
self.ui.secondaryGainCheck.setChecked(False)
self.devStateChanged()
# update unit labels and scaling
if mode == 'VC':
newUnit = 'V'
oldUnit = 'A'
spinOpts = dict(suffix='V', siPrefix=True, dec=True, step=0.5, minStep=1e-3)
self.ui.waveGeneratorWidget.setMeta('y', **spinOpts)
self.ui.waveGeneratorWidget.setMeta('xy', units='V*s', siPrefix=True, dec=True, step=0.5, minStep=1e-6)
else:
newUnit = 'A'
oldUnit = 'V'
spinOpts = dict(suffix='A', siPrefix=True, dec=True, step=0.5, minStep=1e-12)
self.ui.waveGeneratorWidget.setMeta('y', **spinOpts)
self.ui.waveGeneratorWidget.setMeta('xy', units='C', siPrefix=True, dec=True, step=0.5, minStep=1e-15)
self.ui.holdingSpin.setOpts(**spinOpts)
for l in self.unitLabels:
text = str(l.text())
l.setText(text.replace(oldUnit, newUnit))
self.ui.topPlotWidget.setLabel('left', units=oldUnit)
self.ui.bottomPlotWidget.setLabel('left', units=newUnit)
## Hide stim plot for I=0 mode
if mode == 'I=0':
self.ui.bottomPlotWidget.hide()
else:
self.ui.bottomPlotWidget.show()
self.devStateChanged()
self.mode = mode
def setSignals(self, pri, sec):
#print "setSignals", pri, sec
for c, s in [(self.ui.primarySignalCombo, pri), (self.ui.secondarySignalCombo, sec)]:
if s is None:
continue
ind = c.findText(s)
if ind == -1:
for i in range(c.count()):
print(c.itemText(i))
raise Exception('Signal "%s" does not exist' % s)
c.setCurrentIndex(ind)
def handleResult(self, result, params):
if self.resetInpPlots:
self.resetInpPlots = False
self.clearInpPlots()
## Plot the results
#plot = self.ui.topPlotWidget.plot(result['primary'].view(numpy.ndarray) / self.inpScale, x=result.xvals('Time'), params=params)
plot = self.ui.topPlotWidget.plot(result['primary'].view(numpy.ndarray), x=result.xvals('Time'), params=params)
def quit(self):
TaskGui.quit(self)
if not sip.isdeleted(self.daqUI):
Qt.QObject.disconnect(self.daqUI, Qt.SIGNAL('changed'), self.daqChanged)
self.ui.topPlotWidget.close()
self.ui.bottomPlotWidget.close()
| mit |
legalsylvain/OpenUpgrade | addons/stock/__init__.py | 376 | 1115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from stock import *
import partner
import product
import procurement
import report
import wizard
import res_config
import controllers
| agpl-3.0 |
javaos74/neutron | neutron/extensions/dvr.py | 28 | 2674 | # Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions
DISTRIBUTED = 'distributed'
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
DISTRIBUTED: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'convert_to': attributes.convert_to_boolean_if_not_none,
'enforce_policy': True},
}
}
class DVRMacAddressNotFound(exceptions.NotFound):
message = _("Distributed Virtual Router Mac Address for "
"host %(host)s does not exist.")
class MacAddressGenerationFailure(exceptions.ServiceUnavailable):
message = _("Unable to generate unique DVR mac for host %(host)s.")
class Dvr(extensions.ExtensionDescriptor):
"""Extension class supporting distributed virtual router."""
@classmethod
def get_name(cls):
return "Distributed Virtual Router"
@classmethod
def get_alias(cls):
return constants.L3_DISTRIBUTED_EXT_ALIAS
@classmethod
def get_description(cls):
return "Enables configuration of Distributed Virtual Routers."
@classmethod
def get_updated(cls):
return "2014-06-1T10:00:00-00:00"
def get_required_extensions(self):
return ["router"]
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class DVRMacAddressPluginBase(object):
@abc.abstractmethod
def delete_dvr_mac_address(self, context, host):
pass
@abc.abstractmethod
def get_dvr_mac_address_list(self, context):
pass
@abc.abstractmethod
def get_dvr_mac_address_by_host(self, context, host):
pass
| apache-2.0 |
MphasisWyde/eWamSublimeAdaptor | POC/v0_3_POC_with_project_aborted/third-party/requests/packages/urllib3/contrib/pyopenssl.py | 197 | 10094 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from __future__ import absolute_import
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout, error as SocketError
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
# Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(e)
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ca_cert_dir=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs or ca_cert_dir:
try:
ctx.load_verify_locations(ca_certs, ca_cert_dir)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd, _, _ = select.select([sock], [], [], sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
| mit |
MichelRottleuthner/RIOT | tests/ps_schedstatistics/tests/01-run.py | 2 | 2328 | #!/usr/bin/env python3
# Copyright (C) 2017 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
PS_EXPECTED = (
('\tpid | name | state Q | pri | stack ( used) | '
'base addr | current | runtime | switches'),
('\t - | isr_stack | - - | - | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+'),
('\t 1 | idle | pending Q | 15 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 2 | main | running Q | 7 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 3 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 4 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 5 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 6 | thread | bl mutex _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 7 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t | SUM | | | \d+ (\d+)')
)
def _check_startup(child):
for i in range(5):
child.expect_exact('Creating thread #{}, next={}'
.format(i, (i + 1) % 5))
def _check_help(child):
child.sendline('')
child.expect('>')
child.sendline('help')
child.expect_exact('Command Description')
child.expect_exact('---------------------------------------')
child.expect_exact('reboot Reboot the node')
child.expect_exact('ps Prints information about '
'running threads.')
def _check_ps(child):
child.sendline('ps')
for line in PS_EXPECTED:
child.expect(line)
def testfunc(child):
_check_startup(child)
_check_help(child)
_check_ps(child)
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTTOOLS'], 'testrunner'))
from testrunner import run
sys.exit(run(testfunc))
| lgpl-2.1 |
jillesme/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/botinfo.py | 127 | 2054 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: We should consider hanging one of these off the tool object.
class BotInfo(object):
def __init__(self, tool, port_name):
self._tool = tool
self._port_name = port_name
def summary_text(self):
# bot_id is also stored on the options dictionary on the tool.
bot_id = self._tool.status_server.bot_id
bot_id_string = "Bot: %s " % (bot_id) if bot_id else ""
return "%sPort: %s Platform: %s" % (bot_id_string, self._port_name, self._tool.platform.display_name())
| bsd-3-clause |
thehyve/variant | eggs/django-1.3.1-py2.7.egg/django/template/debug.py | 232 | 3797 | from django.conf import settings
from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output, use_l10n=context.use_l10n)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| apache-2.0 |
yanheven/nova | nova/api/openstack/compute/schemas/v3/cells.py | 70 | 3321 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
# NOTE: In unparse_transport_url(), a url consists of the
# following parameters:
# "qpid://<username>:<password>@<rpc_host>:<rpc_port>/"
# or
# "rabiit://<username>:<password>@<rpc_host>:<rpc_port>/"
# Then the url is stored into transport_url of cells table
# which is defined with String(255).
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
# Allow to specify any string for strong password.
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
sync_instances = {
'type': 'object',
'properties': {
'project_id': parameter_types.project_id,
'deleted': parameter_types.boolean,
'updated_since': {
'type': 'string',
'format': 'date-time',
},
},
'additionalProperties': False,
}
| apache-2.0 |
rebroad/bitcoin | qa/rpc-tests/multi_rpc.py | 32 | 4575 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit |
yaojingwu1992/XlsxWriter | xlsxwriter/test/worksheet/test_sparkline11.py | 8 | 9334 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
worksheet.write_row('A2', data)
worksheet.write_row('A3', data)
worksheet.write_row('A4', [1, 2, 3, 4, 5])
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'A1:E1',
'max': 0.5,
'min': -0.5,
'axis': True,
'reverse': True,
'empty_cells': 'zero',
'weight': 0.25,
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
})
worksheet.add_sparkline('F2', {'range': 'A2:E2',
'max': 'group',
'min': 'group',
'empty_cells': 'connect',
'weight': 2.25,
})
worksheet.add_sparkline('F3', {'range': 'A3:E3',
'max': 'group',
'min': '0',
'show_hidden': True,
'weight': 6,
'date_axis': 'A4:E4',
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
<row r="3" spans="1:5" x14ac:dyDescent="0.25">
<c r="A3">
<v>-2</v>
</c>
<c r="B3">
<v>2</v>
</c>
<c r="C3">
<v>3</v>
</c>
<c r="D3">
<v>-1</v>
</c>
<c r="E3">
<v>0</v>
</c>
</row>
<row r="4" spans="1:5" x14ac:dyDescent="0.25">
<c r="A4">
<v>1</v>
</c>
<c r="B4">
<v>2</v>
</c>
<c r="C4">
<v>3</v>
</c>
<c r="D4">
<v>4</v>
</c>
<c r="E4">
<v>5</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup manualMin="0" lineWeight="6" dateAxis="1" displayEmptyCellsAs="gap" displayHidden="1" minAxisType="custom" maxAxisType="group">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<xm:f>Sheet1!A4:E4</xm:f>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A3:E3</xm:f>
<xm:sqref>F3</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup lineWeight="2.25" displayEmptyCellsAs="span" minAxisType="group" maxAxisType="group">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup manualMax="0.5" manualMin="-0.5" lineWeight="0.25" markers="1" high="1" low="1" first="1" last="1" negative="1" displayXAxis="1" minAxisType="custom" maxAxisType="custom" rightToLeft="1">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause |
j00bar/ansible | lib/ansible/modules/cloud/amazon/lambda_alias.py | 77 | 12327 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug:
var: lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
stamhe/pybitcointools | cryptos/wallet_utils.py | 1 | 6572 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious with changes by pycryptools developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import hmac
from .main import *
from .py2specials import *
from .py3specials import *
from . import constants as version
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'p2wpkh-p2sh': 0x049d7878,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x4b2430c,
'p2wsh': 0x2aa7a99
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'p2wpkh-p2sh': 0x049d7cb2,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x4b24746,
'p2wsh': 0x2aa7ed3
}
bh2u = safe_hexlify
hfu = binascii.hexlify
bfh = safe_from_hex
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
class InvalidPassword(Exception):
def __str__(self):
return "Incorrect password"
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPasswordException(Exception):
pass
class InvalidPadding(Exception):
pass
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def append_PKCS7_padding(data):
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data):
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if padlen > 16:
raise InvalidPadding("invalid padding byte (large)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def seed_type(x):
if is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def inv_dict(d):
return {v: k for k, v in d.items()}
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
###################################### BIP32 ##############################
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
pubkey = compress(privtopub(secret))
return pubkey, True
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype]) | mit |
erlimar/prototypeguide | src/lib/flask_cache/backends.py | 14 | 2427 | from werkzeug.contrib.cache import (BaseCache, NullCache, SimpleCache, MemcachedCache,
GAEMemcachedCache, FileSystemCache)
class SASLMemcachedCache(MemcachedCache):
def __init__(self, servers=None, default_timeout=300, key_prefix=None,
username=None, password=None):
BaseCache.__init__(self, default_timeout)
if servers is None:
servers = ['127.0.0.1:11211']
import pylibmc
self._client = pylibmc.Client(servers,
username=username,
password=password,
binary=True)
self.key_prefix = key_prefix
def null(app, config, args, kwargs):
return NullCache()
def simple(app, config, args, kwargs):
kwargs.update(dict(threshold=config['CACHE_THRESHOLD']))
return SimpleCache(*args, **kwargs)
def memcached(app, config, args, kwargs):
args.append(config['CACHE_MEMCACHED_SERVERS'])
kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX']))
return MemcachedCache(*args, **kwargs)
def saslmemcached(app, config, args, kwargs):
args.append(config['CACHE_MEMCACHED_SERVERS'])
kwargs.update(dict(username=config['CACHE_MEMCACHED_USERNAME'],
password=config['CACHE_MEMCACHED_PASSWORD'],
key_prefix=config['CACHE_KEY_PREFIX']))
return SASLMemcachedCache(*args, **kwargs)
def gaememcached(app, config, args, kwargs):
kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX']))
return GAEMemcachedCache(*args, **kwargs)
def filesystem(app, config, args, kwargs):
args.append(config['CACHE_DIR'])
kwargs.update(dict(threshold=config['CACHE_THRESHOLD']))
return FileSystemCache(*args, **kwargs)
# RedisCache is supported since Werkzeug 0.7.
try:
from werkzeug.contrib.cache import RedisCache
except ImportError:
pass
else:
def redis(app, config, args, kwargs):
kwargs.update(dict(
host=config.get('CACHE_REDIS_HOST', 'localhost'),
port=config.get('CACHE_REDIS_PORT', 6379),
))
password = config.get('CACHE_REDIS_PASSWORD')
if password:
kwargs['password'] = password
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
return RedisCache(*args, **kwargs)
| mit |
hugoatease/magpie-django | magpie/management/admin.py | 1 | 1301 | # Copyright 2013-2015 Hugo Caille
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import admin
from models import UserConfig, BandwidthAct, ConnectionLog, UserSettings
class UserSettingsAdmin(admin.ModelAdmin):
list_display = ('user', 'log_optout')
class UserConfigAdmin(admin.ModelAdmin):
list_display = ('user', 'server', 'address')
class BandwidthActAdmin(admin.ModelAdmin):
list_display = ('user', 'server', 'begin', 'end', 'bytes_received', 'bytes_sent')
class ConnectionLogAdmin(admin.ModelAdmin):
list_display = ('user', 'date', 'server', 'origin')
admin.site.register(UserConfig, UserConfigAdmin)
admin.site.register(BandwidthAct, BandwidthActAdmin)
admin.site.register(ConnectionLog, ConnectionLogAdmin)
admin.site.register(UserSettings, UserSettingsAdmin) | apache-2.0 |
ojengwa/odoo | addons/lunch/report/report_lunch_order.py | 341 | 2771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_order(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CLVsol/oehealth_gs | oehealth_gs_medicament/__init__.py | 1 | 1431 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import oehealth_medicament
| agpl-3.0 |
chuan9/chromium-crosswalk | tools/cygprofile/mergetraces_unittest.py | 101 | 1535 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mergetraces
class GroupByProcessAndThreadIdTestBasic(unittest.TestCase):
def runTest(self):
# (sec, usec, 'pid:tid', function address).
input_trace = [
(100, 10, '2000:2001', 0x5),
(100, 11, '2000:2001', 0x3),
(100, 13, '2000:1999', 0x8),
(100, 14, '2000:2000', 0x7),
(120, 13, '2001:2003', 0x9),
(150, 12, '2001:2004', 0x6),
(180, 11, '2000:2000', 0x1),
]
# Functions should be grouped by thread-id and PIDs should not be
# interleaved.
expected_trace = [
(100, 10, '2000:2001', 0x5),
(100, 11, '2000:2001', 0x3),
(100, 13, '2000:1999', 0x8),
(100, 14, '2000:2000', 0x7),
(180, 11, '2000:2000', 0x1),
(120, 13, '2001:2003', 0x9),
(150, 12, '2001:2004', 0x6),
]
grouped_trace = mergetraces.GroupByProcessAndThreadId(input_trace)
self.assertEqual(grouped_trace, expected_trace)
class GroupByProcessAndThreadIdFailsWithNonUniqueTIDs(unittest.TestCase):
def runTest(self):
# (sec, usec, 'pid:tid', function address).
input_trace = [
(100, 10, '1999:2001', 0x5),
(100, 10, '1988:2001', 0x5),
]
try:
mergetraces.GroupByProcessAndThreadId(input_trace)
except Exception:
return
self.fail('Multiple processes should not have a same thread-ID.')
| bsd-3-clause |
jaeh/runtime | deps/v8/tools/testrunner/local/utils.py | 11 | 4316 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import exists
from os.path import isdir
from os.path import join
import platform
import re
import subprocess
import urllib2
def GetSuitePaths(test_root):
return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
# Reads a file into an array of strings
def ReadLinesFrom(name):
lines = []
with open(name) as f:
for line in f:
if line.startswith('#'): continue
if '#' in line:
line = line[:line.find('#')]
line = line.strip()
if not line: continue
lines.append(line)
return lines
def GuessOS():
system = platform.system()
if system == 'Linux':
return 'linux'
elif system == 'Darwin':
return 'macos'
elif system.find('CYGWIN') >= 0:
return 'cygwin'
elif system == 'Windows' or system == 'Microsoft':
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
return 'windows'
elif system == 'FreeBSD':
return 'freebsd'
elif system == 'OpenBSD':
return 'openbsd'
elif system == 'SunOS':
return 'solaris'
elif system == 'NetBSD':
return 'netbsd'
elif system == 'AIX':
return 'aix'
else:
return None
def UseSimulator(arch):
machine = platform.machine()
return (machine and
(arch == "mipsel" or arch == "arm" or arch == "arm64") and
not arch.startswith(machine))
# This will default to building the 32 bit VM even on machines that are
# capable of running the 64 bit VM.
def DefaultArch():
machine = platform.machine()
machine = machine.lower() # Windows 7 capitalizes 'AMD64'.
if machine.startswith('arm'):
return 'arm'
elif (not machine) or (not re.match('(x|i[3-6])86$', machine) is None):
return 'ia32'
elif machine == 'i86pc':
return 'ia32'
elif machine == 'x86_64':
return 'ia32'
elif machine == 'amd64':
return 'ia32'
elif machine == 'ppc64':
return 'ppc'
else:
return None
def GuessWordsize():
if '64' in platform.machine():
return '64'
else:
return '32'
def IsWindows():
return GuessOS() == 'windows'
def URLRetrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
if IsWindows():
try:
# In python 2.7.6 on windows, urlopen has a problem with redirects.
# Try using curl instead. Note, this is fixed in 2.7.8.
subprocess.check_call(["curl", source, '-k', '-L', '-o', destination])
return
except:
# If there's no curl, fall back to urlopen.
print "Curl is currently not installed. Falling back to python."
pass
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
| apache-2.0 |
Storj/driveshare-graph | tests/test_storage.py | 2 | 1859 | import os
import unittest
from pymongo import MongoClient
import datetime as dt
import time
import sqlite3
import pygal
import driveshare_graph.storage as storage
class Storage(unittest.TestCase):
def test_avg_gb_farmer(self):
client = MongoClient('localhost', 27017)
collection = client['GroupB']['totalStorage']
avg = storage.avg_gb_farmer(collection)
self.assertTrue(avg > 0)
def test_init_stats_table(self):
conn = sqlite3.connect('tests/init_test.db')
cursor = conn.cursor()
storage.create_stats_table(conn, cursor)
client = MongoClient('localhost', 27017)
collection = client['GroupB']['totalStorage']
storage.init_stats_table(conn, cursor, collection)
cursor.execute('SELECT date FROM stats WHERE tb = ? AND farmers = ?',
(1515.23, 339))
data = cursor.fetchall()
self.assertTrue(len(data) > 0)
def test_update_stats_table(self):
conn = sqlite3.connect('tests/update_test.db')
cursor = conn.cursor()
client = MongoClient('localhost', 27017)
collection = client['GroupB']['totalStorage']
storage.update_stats_table(conn, cursor, collection)
cursor.execute('SELECT * FROM stats')
data = cursor.fetchall()
self.assertTrue(len(data) > 1)
def test_total_storage_graph(self):
client = MongoClient('localhost', 27017)
collection = client['GroupB']['totalStorage']
graph = storage.total_storage_graph(collection)
self.assertTrue(isinstance(graph, pygal.Line))
def test_total_farmers_graph(self):
client = MongoClient('localhost', 27017)
collection = client['GroupB']['totalStorage']
graph = storage.total_farmers_graph(collection)
self.assertTrue(isinstance(graph, pygal.Line))
| mit |
PKRoma/poedit | deps/boost/libs/python/test/numpy/ndarray.py | 9 | 3957 | #!/usr/bin/env python
# Copyright Jim Bosch & Ankit Daftery 2010-2012.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import ndarray_ext
import unittest
import numpy
class TestNdarray(unittest.TestCase):
def testNdzeros(self):
for dtp in (numpy.int16, numpy.int32, numpy.float32, numpy.complex128):
v = numpy.zeros(60, dtype=dtp)
dt = numpy.dtype(dtp)
for shape in ((60,),(6,10),(4,3,5),(2,2,3,5)):
a1 = ndarray_ext.zeros(shape,dt)
a2 = v.reshape(a1.shape)
self.assertEqual(shape,a1.shape)
self.assert_((a1 == a2).all())
def testNdzeros_matrix(self):
for dtp in (numpy.int16, numpy.int32, numpy.float32, numpy.complex128):
dt = numpy.dtype(dtp)
shape = (6, 10)
a1 = ndarray_ext.zeros_matrix(shape, dt)
a2 = numpy.matrix(numpy.zeros(shape, dtype=dtp))
self.assertEqual(shape,a1.shape)
self.assert_((a1 == a2).all())
self.assertEqual(type(a1), type(a2))
def testNdarray(self):
a = range(0,60)
for dtp in (numpy.int16, numpy.int32, numpy.float32, numpy.complex128):
v = numpy.array(a, dtype=dtp)
dt = numpy.dtype(dtp)
a1 = ndarray_ext.array(a)
a2 = ndarray_ext.array(a,dt)
self.assert_((a1 == v).all())
self.assert_((a2 == v).all())
for shape in ((60,),(6,10),(4,3,5),(2,2,3,5)):
a1 = a1.reshape(shape)
self.assertEqual(shape,a1.shape)
a2 = a2.reshape(shape)
self.assertEqual(shape,a2.shape)
def testNdempty(self):
for dtp in (numpy.int16, numpy.int32, numpy.float32, numpy.complex128):
dt = numpy.dtype(dtp)
for shape in ((60,),(6,10),(4,3,5),(2,2,3,5)):
a1 = ndarray_ext.empty(shape,dt)
a2 = ndarray_ext.c_empty(shape,dt)
self.assertEqual(shape,a1.shape)
self.assertEqual(shape,a2.shape)
def testTranspose(self):
for dtp in (numpy.int16, numpy.int32, numpy.float32, numpy.complex128):
dt = numpy.dtype(dtp)
for shape in ((6,10),(4,3,5),(2,2,3,5)):
a1 = numpy.empty(shape,dt)
a2 = a1.transpose()
a1 = ndarray_ext.transpose(a1)
self.assertEqual(a1.shape,a2.shape)
def testSqueeze(self):
a1 = numpy.array([[[3,4,5]]])
a2 = a1.squeeze()
a1 = ndarray_ext.squeeze(a1)
self.assertEqual(a1.shape,a2.shape)
def testReshape(self):
a1 = numpy.empty((2,2))
a2 = ndarray_ext.reshape(a1,(1,4))
self.assertEqual(a2.shape,(1,4))
def testShapeIndex(self):
a = numpy.arange(24)
a.shape = (1,2,3,4)
def shape_check(i):
print(i)
self.assertEqual(ndarray_ext.shape_index(a,i) ,a.shape[i] )
for i in range(4):
shape_check(i)
for i in range(-1,-5,-1):
shape_check(i)
try:
ndarray_ext.shape_index(a,4) # out of bounds -- should raise IndexError
self.assertTrue(False)
except IndexError:
pass
def testStridesIndex(self):
a = numpy.arange(24)
a.shape = (1,2,3,4)
def strides_check(i):
print(i)
self.assertEqual(ndarray_ext.strides_index(a,i) ,a.strides[i] )
for i in range(4):
strides_check(i)
for i in range(-1,-5,-1):
strides_check(i)
try:
ndarray_ext.strides_index(a,4) # out of bounds -- should raise IndexError
self.assertTrue(False)
except IndexError:
pass
if __name__=="__main__":
unittest.main()
| mit |
mj10777/QGIS | python/plugins/processing/algs/qgis/RandomPointsPolygons.py | 4 | 8707 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RandomPointsPolygons.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
import os
import random
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsField,
QgsFeatureSink,
QgsFeature,
QgsFields,
QgsGeometry,
QgsPointXY,
QgsWkbTypes,
QgsSpatialIndex,
QgsFeatureRequest,
QgsExpression,
QgsDistanceArea,
QgsProject,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterDistance,
QgsProcessingParameterNumber,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterExpression,
QgsProcessingParameterEnum,
QgsProcessingParameterDefinition)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class RandomPointsPolygons(QgisAlgorithm):
INPUT = 'INPUT'
EXPRESSION = 'EXPRESSION'
MIN_DISTANCE = 'MIN_DISTANCE'
STRATEGY = 'STRATEGY'
OUTPUT = 'OUTPUT'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmRandomPointsWithinPolygon.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmRandomPointsWithinPolygon.svg")
def group(self):
return self.tr('Vector creation')
def groupId(self):
return 'vectorcreation'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.strategies = [self.tr('Points count'),
self.tr('Points density')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterEnum(self.STRATEGY,
self.tr('Sampling strategy'),
self.strategies,
False,
0))
self.addParameter(QgsProcessingParameterExpression(self.EXPRESSION,
self.tr('Expression'),
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterDistance(self.MIN_DISTANCE,
self.tr('Minimum distance between points'),
0, self.INPUT, False, 0, 1000000000))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Random points'),
type=QgsProcessing.TypeVectorPoint))
def name(self):
return 'randompointsinsidepolygons'
def displayName(self):
return self.tr('Random points inside polygons')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
strategy = self.parameterAsEnum(parameters, self.STRATEGY, context)
minDistance = self.parameterAsDouble(parameters, self.MIN_DISTANCE, context)
expression = QgsExpression(self.parameterAsString(parameters, self.EXPRESSION, context))
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
expressionContext = self.createExpressionContext(parameters, context, source)
expression.prepare(expressionContext)
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Point, source.sourceCrs(), QgsFeatureSink.RegeneratePrimaryKey)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
da = QgsDistanceArea()
da.setSourceCrs(source.sourceCrs(), context.transformContext())
da.setEllipsoid(context.project().ellipsoid())
total = 100.0 / source.featureCount() if source.featureCount() else 0
current_progress = 0
pointId = 0
for current, f in enumerate(source.getFeatures()):
if feedback.isCanceled():
break
if not f.hasGeometry():
continue
current_progress = total * current
feedback.setProgress(current_progress)
expressionContext.setFeature(f)
value = expression.evaluate(expressionContext)
if expression.hasEvalError():
feedback.pushInfo(
self.tr('Evaluation error for feature ID {}: {}').format(f.id(), expression.evalErrorString()))
continue
fGeom = f.geometry()
engine = QgsGeometry.createGeometryEngine(fGeom.constGet())
engine.prepareGeometry()
bbox = fGeom.boundingBox()
if strategy == 0:
pointCount = int(value)
else:
pointCount = int(round(value * da.measureArea(fGeom)))
if pointCount == 0:
feedback.pushInfo("Skip feature {} as number of points for it is 0.".format(f.id()))
continue
index = QgsSpatialIndex()
points = dict()
nPoints = 0
nIterations = 0
maxIterations = pointCount * 200
feature_total = total / pointCount if pointCount else 1
random.seed()
while nIterations < maxIterations and nPoints < pointCount:
if feedback.isCanceled():
break
rx = bbox.xMinimum() + bbox.width() * random.random()
ry = bbox.yMinimum() + bbox.height() * random.random()
p = QgsPointXY(rx, ry)
geom = QgsGeometry.fromPointXY(p)
if engine.contains(geom.constGet()) and \
vector.checkMinDistance(p, index, minDistance, points):
f = QgsFeature(nPoints)
f.initAttributes(1)
f.setFields(fields)
f.setAttribute('id', pointId)
f.setGeometry(geom)
sink.addFeature(f, QgsFeatureSink.FastInsert)
index.addFeature(f)
points[nPoints] = p
nPoints += 1
pointId += 1
feedback.setProgress(current_progress + int(nPoints * feature_total))
nIterations += 1
if nPoints < pointCount:
feedback.pushInfo(self.tr('Could not generate requested number of random '
'points. Maximum number of attempts exceeded.'))
feedback.setProgress(100)
return {self.OUTPUT: dest_id}
| gpl-2.0 |
abstract-open-solutions/management-system | __unported__/mgmtsystem_review/__openerp__.py | 3 | 1750 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Management System - Review",
"version": "1.0",
"author": "Savoir-faire Linux",
"website": "http://www.savoirfairelinux.com",
"license": "AGPL-3",
"category": "Management System",
"description": """\
This module enables you to manage reviews of your management system.
""",
"depends": [
'mgmtsystem_nonconformity',
'mgmtsystem_survey',
],
"data": [
'security/ir.model.access.csv',
'security/mgmtsystem_review_security.xml',
'review_sequence.xml',
'mgmtsystem_review.xml',
'report/review_report.xml',
],
"demo": [],
'installable': False,
"certificate": ''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chengdh/openerp-ktv | openerp/pychart/axis_doc.py | 15 | 4884 | # -*- coding: utf-8 -*-
# automatically generated by generate_docs.py.
doc_x="""Attributes supported by this class are:
draw_tics_above(type:int) default="If true, tick lines and labels are drawn above the axis line.".
minor_tic_len(type:length in points (\\xref{unit})) default="The length of minor tick marks. The value can be negative, in which case the tick lines are drawn right of (or above) the axis.".
tic_label_offset(type:(x,y)) default="The location for drawing tick labels,
relative to the tip of the tick line.".
format(type:printf format string) default="The format string for tick labels.
It can be a `printf' style format string, or
a single-parameter function that takes an X (or Y) value
and returns a string. The appearance of the string produced here can be
controlled using escape sequences. <<font>>".
label_offset(type:(x,y) or None) default="The location for drawing the axis label,
relative to the middle point of the axis.
If the value is None, the label is displayed
below (or to the left of) of axis at the middle.".
label(type:str) default="The descriptive string displayed below (or to the left of) the axis. <<font>>.".
offset(type:length in points (\\xref{unit})) default="The location of the axis.
The value of 0 draws the
axis at the left (for the Y axis) or bottom (for the X axis)
edge of the drawing area.
".
tic_interval(type:Number or function) default="When the value is a number, it specifies the interval at which tick marks are drawn. Otherwise, the value must be a function that takes no argument and returns the list of numbers. The return value specifies the X or Y points at which tick marks are drawn.".
line_style(type:line_style.T) default="Specifies the style of axis and tick lines.".
tic_len(type:length in points (\\xref{unit})) default="The length of tick lines. The value can be negative, in which case the tick lines are drawn right of (or above) the axis.".
minor_tic_interval(type:Number or function) default="When the value is a number, it specifies the interval at which minor tick marks are drawn. Otherwise, the value must be a function that takes no argument and returns the list of numbers. The return value specifies the X or Y points at which minor tick marks are drawn.".
"""
doc_y="""Attributes supported by this class are:
draw_tics_right(type:int) default="If true, tick lines and labels are drawn right of the axis line.".
minor_tic_len(type:length in points (\\xref{unit})) default="The length of minor tick marks. The value can be negative, in which case the tick lines are drawn right of (or above) the axis.".
tic_label_offset(type:(x,y)) default="The location for drawing tick labels,
relative to the tip of the tick line.".
format(type:printf format string) default="The format string for tick labels.
It can be a `printf' style format string, or
a single-parameter function that takes an X (or Y) value
and returns a string. The appearance of the string produced here can be
controlled using escape sequences. <<font>>".
label_offset(type:(x,y) or None) default="The location for drawing the axis label,
relative to the middle point of the axis.
If the value is None, the label is displayed
below (or to the left of) of axis at the middle.".
label(type:str) default="The descriptive string displayed below (or to the left of) the axis. <<font>>.".
offset(type:length in points (\\xref{unit})) default="The location of the axis.
The value of 0 draws the
axis at the left (for the Y axis) or bottom (for the X axis)
edge of the drawing area.
".
tic_interval(type:Number or function) default="When the value is a number, it specifies the interval at which tick marks are drawn. Otherwise, the value must be a function that takes no argument and returns the list of numbers. The return value specifies the X or Y points at which tick marks are drawn.".
line_style(type:line_style.T) default="Specifies the style of axis and tick lines.".
tic_len(type:length in points (\\xref{unit})) default="The length of tick lines. The value can be negative, in which case the tick lines are drawn right of (or above) the axis.".
minor_tic_interval(type:Number or function) default="When the value is a number, it specifies the interval at which minor tick marks are drawn. Otherwise, the value must be a function that takes no argument and returns the list of numbers. The return value specifies the X or Y points at which minor tick marks are drawn.".
"""
| agpl-3.0 |
crosswalk-project/crosswalk-test-suite | stability/stability-lowresource-android-tests/lowresource/TestApp.py | 15 | 6686 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<haox.li@intel.com>
import sys
import commands
import subprocess
reload(sys)
sys.setdefaultencoding('utf-8')
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
class TestApp():
device = ""
location = ""
pkgname = ""
activname = ""
def __init__(self, device, location, pkgname, activname):
self.device = device
self.location = location
self.pkgname = pkgname
self.activname = activname
def install(self):
action_status = False
if self.location.endswith(".apk"):
if not self.isInstalled():
cmd = "%s -s %s install %s" % (ADB_CMD, self.device, self.location)
(return_code, output) = doCMD(cmd)
if self.isInstalled():
action_status = True
else:
print "-->> %s fail to install." % self.location
else:
print "-->> %s has been installed." % self.pkgname
else:
print "-->> Invalid apk location: %s " % self.location
return action_status
def uninstall(self):
action_status = False
if self.isInstalled():
cmd = "%s -s %s uninstall %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isInstalled():
action_status = True
else:
print "-->> %s fail to uninstall." % self.pkgname
else:
print "-->> %s has not been installed." % self.pkgname
return action_status
def launch(self):
action_status = False
if not self.isRunning():
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isRunning():
action_status = True
else:
print "-->> %s fail to launch." % self.pkgname
else:
print "-->> %s has been launched." % self.pkgname
return action_status
def switch(self):
action_status = False
# If in Activity, switch to background, otherwise switch to front
if self.isActivity():
# Switch to Home
# keycode
# 3 --> "KEYCODE_HOME"
cmd = "%s -s %s shell input keyevent 3" % (ADB_CMD, self.device)
(return_code, output) = doCMD(cmd)
if not self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to background." % self.pkgname
else:
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to front." % self.pkgname
return action_status
def stop(self):
action_status = False
if self.isRunning():
cmd = "%s -s %s shell am force-stop %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isRunning():
action_status = True
else:
print "-->> %s fail to stop." % self.pkgname
else:
print "-->> %s has been stoped." % self.pkgname
return action_status
def isInstalled(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell pm list packages |grep %s|awk -F ':' '{print $2}'" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if self.pkgname in output:
action_status = True
return action_status
def isRunning(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell ps |grep %s|awk -F ' ' '{print $NF}'" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if self.pkgname in output:
action_status = True
return action_status
def isActivity(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell dumpsys activity |grep \"%s\"" % (ADB_CMD, self.device, "Recent #0")
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
| bsd-3-clause |
Sodki/ansible | lib/ansible/modules/system/timezone.py | 26 | 22063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Shinichi TAMURA (@tmshn)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: timezone
short_description: Configure timezone setting
description:
- This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module.
- It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
- Several different tools are used depending on the OS/Distribution involved.
For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) andC(hwclock).
On SmartOS , C(sm-set-timezone), for BSD, C(/etc/localtime) is modified.
- As of version 2.3 support was added for SmartOS and BSDs.
- Windows, AIX and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
version_added: "2.2"
options:
name:
description:
- Name of the timezone for the system clock.
Default is to keep current setting. B(At least one of name and
hwclock are required.)
required: false
hwclock:
description:
- Whether the hardware clock is in UTC or in local timezone.
Default is to keep current setting.
Note that this option is recommended not to change and may fail
to configure, especially on virtual environments such as AWS.
B(At least one of name and hwclock are required.)
I(Only used on Linux.)
required: false
aliases: ['rtc']
notes:
- On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
author:
- "Shinichi TAMURA (@tmshn)"
- "Jasper Lievisse Adriaanse (@jasperla)"
'''
RETURN = '''
diff:
description: The differences about the given arguments.
returned: success
type: dictionary
contains:
before:
description: The values before change
type: dict
after:
description: The values after change
type: dict
'''
EXAMPLES = '''
- name: set timezone to Asia/Tokyo
timezone:
name: Asia/Tokyo
'''
import os
import platform
import random
import re
import string
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.six import iteritems
class Timezone(object):
"""This is a generic Timezone manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:
- get(key, phase) ... get the value from the system at `phase`
- set(key, value) ... set the value to the current system
"""
def __new__(cls, module):
"""Return the platform-specific subclass.
It does not use load_platform_subclass() because it needs to judge based
on whether the `timedatectl` command exists and is available.
Args:
module: The AnsibleModule.
"""
if get_platform() == 'Linux':
timedatectl = module.get_bin_path('timedatectl')
if timedatectl is not None and module.run_command(timedatectl)[0] == 0:
return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
else:
return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
elif re.match('^joyent_.*Z', platform.version()):
# get_platform() returns SunOS, which is too broad. So look at the
# platform version instead. However we have to ensure that we're not
# running in the global zone where changing the timezone has no effect.
zonename_cmd = module.get_bin_path('zonename')
if zonename_cmd is not None:
(rc, stdout, _ ) = module.run_command(zonename_cmd)
if rc == 0 and stdout.strip() == 'global':
module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
elif re.match('^(Free|Net|Open)BSD', platform.platform()):
return super(Timezone, BSDTimezone).__new__(BSDTimezone)
else:
# Not supported yet
return super(Timezone, Timezone).__new__(Timezone)
def __init__(self, module):
"""Initialize of the class.
Args:
module: The AnsibleModule.
"""
super(Timezone, self).__init__()
self.msg = []
# `self.value` holds the values for each params on each phases.
# Initially there's only info of "planned" phase, but the
# `self.check()` function will fill out it.
self.value = dict()
for key in module.argument_spec:
value = module.params[key]
if value is not None:
self.value[key] = dict(planned=value)
self.module = module
def abort(self, msg):
"""Abort the process with error message.
This is just the wrapper of module.fail_json().
Args:
msg: The error message.
"""
error_msg = ['Error message:', msg]
if len(self.msg) > 0:
error_msg.append('Other message(s):')
error_msg.extend(self.msg)
self.module.fail_json(msg='\n'.join(error_msg))
def execute(self, *commands, **kwargs):
"""Execute the shell command.
This is just the wrapper of module.run_command().
Args:
*commands: The command to execute.
It will be concatenated with single space.
**kwargs: Only 'log' key is checked.
If kwargs['log'] is true, record the command to self.msg.
Returns:
stdout: Standard output of the command.
"""
command = ' '.join(commands)
(rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
if kwargs.get('log', False):
self.msg.append('executed `%s`' % command)
return stdout
def diff(self, phase1='before', phase2='after'):
"""Calculate the difference between given 2 phases.
Args:
phase1, phase2: The names of phase to compare.
Returns:
diff: The difference of value between phase1 and phase2.
This is in the format which can be used with the
`--diff` option of ansible-playbook.
"""
diff = {phase1: {}, phase2: {}}
for key, value in iteritems(self.value):
diff[phase1][key] = value[phase1]
diff[phase2][key] = value[phase2]
return diff
def check(self, phase):
"""Check the state in given phase and set it to `self.value`.
Args:
phase: The name of the phase to check.
Returns:
NO RETURN VALUE
"""
if phase == 'planned':
return
for key, value in iteritems(self.value):
value[phase] = self.get(key, phase)
def change(self):
"""Make the changes effect based on `self.value`."""
for key, value in iteritems(self.value):
if value['before'] != value['planned']:
self.set(key, value['planned'])
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get(self, key, phase):
"""Get the value for the key at the given phase.
Called from self.check().
Args:
key: The key to get the value
phase: The phase to get the value
Return:
value: The value for the key at the given phase.
"""
self.abort('get(key, phase) is not implemented on target platform')
def set(self, key, value):
"""Set the value for the key (of course, for the phase 'after').
Called from self.change().
Args:
key: Key to set the value
value: Value to set
"""
self.abort('set(key, value) is not implemented on target platform')
def _verify_timezone(self):
tz = self.value['name']['planned']
tzfile = '/usr/share/zoneinfo/%s' % tz
if not os.path.isfile(tzfile):
self.abort('given timezone "%s" is not available' % tz)
return tzfile
class SystemdTimezone(Timezone):
"""This is a Timezone manipulation class for systemd-powered Linux.
It uses the `timedatectl` command to check/set all arguments.
"""
regexps = dict(
hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
name =re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
)
subcmds = dict(
hwclock='set-local-rtc',
name ='set-timezone'
)
def __init__(self, module):
super(SystemdTimezone, self).__init__(module)
self.timedatectl = module.get_bin_path('timedatectl', required=True)
self.status = dict()
# Validate given timezone
if 'name' in self.value:
self._verify_timezone()
def _get_status(self, phase):
if phase not in self.status:
self.status[phase] = self.execute(self.timedatectl, 'status')
return self.status[phase]
def get(self, key, phase):
status = self._get_status(phase)
value = self.regexps[key].search(status).group(1)
if key == 'hwclock':
# For key='hwclock'; convert yes/no -> local/UTC
if self.module.boolean(value):
value = 'local'
else:
value = 'UTC'
return value
def set(self, key, value):
# For key='hwclock'; convert UTC/local -> yes/no
if key == 'hwclock':
if value == 'local':
value = 'yes'
else:
value = 'no'
self.execute(self.timedatectl, self.subcmds[key], value, log=True)
class NosystemdTimezone(Timezone):
"""This is a Timezone manipulation class for non systemd-powered Linux.
For timezone setting, it edits the following file and reflect changes:
- /etc/sysconfig/clock ... RHEL/CentOS
- /etc/timezone ... Debian/Ubuntu
For hwclock setting, it executes `hwclock --systohc` command with the
'--utc' or '--localtime' option.
"""
conf_files = dict(
name =None, # To be set in __init__
hwclock=None, # To be set in __init__
adjtime='/etc/adjtime'
)
regexps = dict(
name =None, # To be set in __init__
hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
)
def __init__(self, module):
super(NosystemdTimezone, self).__init__(module)
# Validate given timezone
if 'name' in self.value:
tzfile = self._verify_timezone()
self.update_timezone = self.module.get_bin_path('cp', required=True)
self.update_timezone += ' %s /etc/localtime' % tzfile
self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
# Distribution-specific configurations
if self.module.get_bin_path('dpkg-reconfigure') is not None:
# Debian/Ubuntu
self.update_timezone = self.module.get_bin_path('dpkg-reconfigure', required=True)
self.update_timezone += ' --frontend noninteractive tzdata'
self.conf_files['name'] = '/etc/timezone'
self.conf_files['hwclock'] = '/etc/default/rcS'
self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
self.tzline_format = '%s\n'
else:
# RHEL/CentOS
if self.module.get_bin_path('tzdata-update') is not None:
self.update_timezone = self.module.get_bin_path('tzdata-update', required=True)
# else:
# self.update_timezone = 'cp ...' <- configured above
self.conf_files['name'] = '/etc/sysconfig/clock'
self.conf_files['hwclock'] = '/etc/sysconfig/clock'
self.regexps['name'] = re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
self.tzline_format = 'ZONE="%s"\n'
self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
def _edit_file(self, filename, regexp, value):
"""Replace the first matched line with given `value`.
If `regexp` matched more than once, other than the first line will be deleted.
Args:
filename: The name of the file to edit.
regexp: The regular expression to search with.
value: The line which will be inserted.
"""
# Read the file
try:
file = open(filename, 'r')
except IOError:
self.abort('cannot read "%s"' % filename)
else:
lines = file.readlines()
file.close()
# Find the all matched lines
matched_indices = []
for i, line in enumerate(lines):
if regexp.search(line):
matched_indices.append(i)
if len(matched_indices) > 0:
insert_line = matched_indices[0]
else:
insert_line = 0
# Remove all matched lines
for i in matched_indices[::-1]:
del lines[i]
# ...and insert the value
lines.insert(insert_line, value)
# Write the changes
try:
file = open(filename, 'w')
except IOError:
self.abort('cannot write to "%s"' % filename)
else:
file.writelines(lines)
file.close()
self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
def get(self, key, phase):
if key == 'hwclock' and os.path.isfile('/etc/adjtime'):
# If /etc/adjtime exists, use that file.
key = 'adjtime'
filename = self.conf_files[key]
try:
file = open(filename, mode='r')
except IOError:
self.abort('cannot read configuration file "%s" for %s' % (filename, key))
else:
status = file.read()
file.close()
try:
value = self.regexps[key].search(status).group(1)
except AttributeError:
self.abort('cannot find the valid value from configuration file "%s" for %s' % (filename, key))
else:
if key == 'hwclock':
# For key='hwclock'; convert yes/no -> UTC/local
if self.module.boolean(value):
value = 'UTC'
else:
value = 'local'
elif key == 'adjtime':
# For key='adjtime'; convert LOCAL -> local
if value != 'UTC':
value = value.lower()
return value
def set_timezone(self, value):
self._edit_file(filename=self.conf_files['name'],
regexp=self.regexps['name'],
value=self.tzline_format % value)
self.execute(self.update_timezone)
def set_hwclock(self, value):
if value == 'local':
option = '--localtime'
else:
option = '--utc'
self.execute(self.update_hwclock, '--systohc', option, log=True)
def set(self, key, value):
if key == 'name':
self.set_timezone(value)
elif key == 'hwclock':
self.set_hwclock(value)
else:
self.abort('unknown parameter "%s"' % key)
class SmartOSTimezone(Timezone):
"""This is a Timezone manipulation class for SmartOS instances.
It uses the C(sm-set-timezone) utility to set the timezone, and
inspects C(/etc/default/init) to determine the current timezone.
NB: A zone needs to be rebooted in order for the change to be
activated.
"""
def __init__(self, module):
super(SmartOSTimezone, self).__init__(module)
self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
if not self.settimezone:
module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
def get(self, key, phase):
"""Lookup the current timezone name in `/etc/default/init`. If anything else
is requested, or if the TZ field is not set we fail.
"""
if key == 'name':
try:
f = open('/etc/default/init', 'r')
for line in f:
m = re.match('^TZ=(.*)$', line.strip())
if m:
return m.groups()[0]
except:
self.module.fail_json(msg='Failed to read /etc/default/init')
else:
self.module.fail_json(msg='{0} is not a supported option on target platform'.format(key))
def set(self, key, value):
"""Set the requested timezone through sm-set-timezone, an invalid timezone name
will be rejected and we have no further input validation to perform.
"""
if key == 'name':
cmd = 'sm-set-timezone {0}'.format(value)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
# sm-set-timezone knows no state and will always set the timezone.
# XXX: https://github.com/joyent/smtools/pull/2
m = re.match('^\* Changed (to)? timezone (to)? ({0}).*'.format(value), stdout.splitlines()[1])
if not (m and m.groups()[-1] == value):
self.module.fail_json(msg='Failed to set timezone')
else:
self.module.fail_json(msg='{0} is not a supported option on target platform'.
format(key))
class BSDTimezone(Timezone):
"""This is the timezone implementation for *BSD which works simply through
updating the `/etc/localtime` symlink to point to a valid timezone name under
`/usr/share/zoneinfo`.
"""
def __init__(self, module):
super(BSDTimezone, self).__init__(module)
def get(self, key, phase):
"""Lookup the current timezone by resolving `/etc/localtime`."""
if key == 'name':
try:
tz = os.readlink('/etc/localtime')
return tz.replace('/usr/share/zoneinfo/', '')
except:
self.module.fail_json(msg='Could not read /etc/localtime')
else:
self.module.fail_json(msg='{0} is not a supported option on target platform'.
format(key))
def set(self, key, value):
if key == 'name':
# First determine if the requested timezone is valid by looking in
# the zoneinfo directory.
zonefile = '/usr/share/zoneinfo/' + value
try:
if not os.path.isfile(zonefile):
self.module.fail_json(msg='{0} is not a recognized timezone'.format(value))
except:
self.module.fail_json(msg='Failed to stat {0}'.format(zonefile))
# Now (somewhat) atomically update the symlink by creating a new
# symlink and move it into place. Otherwise we have to remove the
# original symlink and create the new symlink, however that would
# create a race condition in case another process tries to read
# /etc/localtime between removal and creation.
suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
new_localtime = '/etc/localtime.' + suffix
try:
os.symlink(zonefile, new_localtime)
os.rename(new_localtime, '/etc/localtime')
except:
os.remove(new_localtime)
self.module.fail_json(msg='Could not update /etc/localtime')
else:
self.module.fail_json(msg='{0} is not a supported option on target platform'.format(key))
def main():
# Construct 'module' and 'tz'
module = AnsibleModule(
argument_spec=dict(
hwclock=dict(choices=['UTC', 'local'], aliases=['rtc']),
name=dict(),
),
required_one_of=[['hwclock', 'name']],
supports_check_mode=True
)
tz = Timezone(module)
# Check the current state
tz.check(phase='before')
if module.check_mode:
diff = tz.diff('before', 'planned')
# In check mode, 'planned' state is treated as 'after' state
diff['after'] = diff.pop('planned')
else:
# Make change
tz.change()
# Check the current state
tz.check(phase='after')
# Examine if the current state matches planned state
(after, planned) = tz.diff('after', 'planned').values()
if after != planned:
tz.abort('still not desired state, though changes have made')
diff = tz.diff('before', 'after')
changed = (diff['before'] != diff['after'])
if len(tz.msg) > 0:
module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
else:
module.exit_json(changed=changed, diff=diff)
if __name__ == '__main__':
main()
| gpl-3.0 |
windskyer/nova | nova/tests/functional/db/test_archive.py | 3 | 4820 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.dialects import sqlite
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.tests.functional import test_servers
from nova.tests.unit import fake_network
class TestDatabaseArchive(test_servers.ServersTestBase):
"""Tests DB API for archiving (soft) deleted records"""
def setUp(self):
super(TestDatabaseArchive, self).setUp()
# TODO(mriedem): pull this out so we can re-use it in
# test_archive_deleted_rows_fk_constraint
# SQLite doesn't enforce foreign key constraints without a pragma.
engine = sqlalchemy_api.get_engine()
dialect = engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
engine.connect().execute("PRAGMA foreign_keys = ON")
def _create_server(self):
"""Creates a minimal test server via the compute API
Ensures the server is created and can be retrieved from the compute API
and waits for it to be ACTIVE.
:returns: created server (dict)
"""
# TODO(mriedem): We should pull this up into the parent class so we
# don't have so much copy/paste in these functional tests.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
return found_server
def test_archive_deleted_rows(self):
# Boots a server, deletes it, and then tries to archive it.
server = self._create_server()
server_id = server['id']
# Assert that there are instance_actions. instance_actions are
# interesting since we don't soft delete them but they have a foreign
# key back to the instances table.
actions = self.api.get_instance_actions(server_id)
self.assertTrue(len(actions),
'No instance actions for server: %s' % server_id)
self._delete_server(server_id)
# Verify we have the soft deleted instance in the database.
admin_context = context.get_admin_context(read_deleted='yes')
# This will raise InstanceNotFound if it's not found.
instance = db.instance_get_by_uuid(admin_context, server_id)
# Make sure it's soft deleted.
self.assertNotEqual(0, instance.deleted)
# Verify we have some system_metadata since we'll check that later.
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
results['instance_system_metadata'])
# FIXME(mriedem): we fail to archive instances because of a fkey
# referential constraint error with instance_actions not being deleted
self.assertNotIn('instances', results)
# FIXME(mriedem): instance_actions aren't soft deleted so they aren't
# archived, which we need to fix.
self.assertNotIn('instance_actions', results)
| gpl-2.0 |
tumbl3w33d/ansible | lib/ansible/modules/packaging/os/homebrew.py | 4 | 27897 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# Based on macports (Jimmy Tang <jcftang@gmail.com>)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Andrew Dunham (@andrew-d)"
requirements:
- "python >= 2.6"
- homebrew must already be installed on the target system
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
version_added: "1.1"
options:
name:
description:
- list of names of packages to install/remove
aliases: ['pkg', 'package', 'formula']
type: list
elements: str
path:
description:
- "A ':' separated list of paths to search for 'brew' executable.
Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
default: '/usr/local/bin'
state:
description:
- state of the package
choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
default: present
update_homebrew:
description:
- update homebrew itself first
type: bool
default: 'no'
aliases: ['update-brew']
upgrade_all:
description:
- upgrade all homebrew packages
type: bool
default: 'no'
aliases: ['upgrade']
install_options:
description:
- options flags to install a package
aliases: ['options']
version_added: "1.4"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
# Install formula foo with 'brew' in default path (C(/usr/local/bin))
- homebrew:
name: foo
state: present
# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
- homebrew:
name: foo
path: /my/other/location/bin
state: present
# Update homebrew first and install formula foo with 'brew' in default path
- homebrew:
name: foo
state: present
update_homebrew: yes
# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
- homebrew:
name: foo
state: latest
update_homebrew: yes
# Update homebrew and upgrade all packages
- homebrew:
update_homebrew: yes
upgrade_all: yes
# Miscellaneous other examples
- homebrew:
name: foo
state: head
- homebrew:
name: foo
state: linked
- homebrew:
name: foo
state: absent
- homebrew:
name: foo,bar
state: absent
- homebrew:
name: foo
state: present
install_options: with-baz,enable-debug
'''
import os.path
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems, string_types
# exceptions -------------------------------------------------------------- {{{
class HomebrewException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class Homebrew(object):
'''A class to manage Homebrew packages.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_PACKAGE_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ # slash (for taps)
\+ # plusses
- # dashes
: # colons (for URLs)
@ # at-sign
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, string_types):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, string_types)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_package(cls, package):
'''A valid package is either None or alphanumeric.'''
if package is None:
return True
return (
isinstance(package, string_types)
and not cls.INVALID_PACKAGE_REGEX.search(package)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- None
- installed
- upgraded
- head
- linked
- unlinked
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, string_types)
and state.lower() in (
'installed',
'upgraded',
'head',
'linked',
'unlinked',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewException(self.message)
else:
if isinstance(path, string_types):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_package(self):
return self._current_package
@current_package.setter
def current_package(self, package):
if not self.valid_package(package):
self._current_package = None
self.failed = True
self.message = 'Invalid package: {0}.'.format(package)
raise HomebrewException(self.message)
else:
self._current_package = package
return package
# /class properties -------------------------------------------- }}}
def __init__(self, module, path, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
install_options=None):
if not install_options:
install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all,
install_options=install_options, )
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
self._prep_brew_path()
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_package_is_installed(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"info",
self.current_package,
]
rc, out, err = self.module.run_command(cmd)
for line in out.split('\n'):
if (
re.search(r'Built from source', line)
or re.search(r'Poured from bottle', line)
):
return True
return False
def _current_package_is_outdated(self):
if not self.valid_package(self.current_package):
return False
rc, out, err = self.module.run_command([
self.brew_path,
'outdated',
self.current_package,
])
return rc != 0
def _current_package_is_installed_from_head(self):
if not Homebrew.valid_package(self.current_package):
return False
elif not self._current_package_is_installed():
return False
rc, out, err = self.module.run_command([
self.brew_path,
'info',
self.current_package,
])
try:
version_info = [line for line in out.split('\n') if line][0]
except IndexError:
return False
return version_info.split(' ')[-1] == 'HEAD'
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.update_homebrew:
self._update_homebrew()
if self.upgrade_all:
self._upgrade_all()
if self.packages:
if self.state == 'installed':
return self._install_packages()
elif self.state == 'upgraded':
return self._upgrade_packages()
elif self.state == 'head':
return self._install_packages()
elif self.state == 'linked':
return self._link_packages()
elif self.state == 'unlinked':
return self._unlink_packages()
elif self.state == 'absent':
return self._uninstall_packages()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
])
if rc == 0:
if out and isinstance(out, string_types):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /updated ------------------------------- }}}
# _upgrade_all --------------------------- {{{
def _upgrade_all(self):
rc, out, err = self.module.run_command([
self.brew_path,
'upgrade',
])
if rc == 0:
if not out:
self.message = 'Homebrew packages already upgraded.'
else:
self.changed = True
self.message = 'Homebrew upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
def _install_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already installed: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be installed: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
if self.state == 'head':
head = '--HEAD'
else:
head = None
opts = (
[self.brew_path, 'install']
+ self.install_options
+ [self.current_package, head]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package installed: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _install_packages(self):
for package in self.packages:
self.current_package = package
self._install_current_package()
return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
def _upgrade_current_package(self):
command = 'upgrade'
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
command = 'install'
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.message = 'Package is already upgraded: {0}'.format(
self.current_package,
)
self.unchanged_count += 1
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be upgraded: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, command]
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.changed_count += 1
self.changed = True
self.message = 'Package upgraded: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_all_packages(self):
opts = (
[self.brew_path, 'upgrade']
+ self.install_options
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed = True
self.message = 'All packages upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_packages(self):
if not self.packages:
self._upgrade_all_packages()
else:
for package in self.packages:
self.current_package = package
self._upgrade_current_package()
return True
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already uninstalled: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be uninstalled: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'uninstall', '--force']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if not self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package uninstalled: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _uninstall_packages(self):
for package in self.packages:
self.current_package = package
self._uninstall_current_package()
return True
# /uninstalled ----------------------------- }}}
# linked --------------------------------- {{{
def _link_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be linked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'link']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package linked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be linked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _link_packages(self):
for package in self.packages:
self.current_package = package
self._link_current_package()
return True
# /linked -------------------------------- }}}
# unlinked ------------------------------- {{{
def _unlink_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be unlinked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'unlink']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package unlinked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _unlink_packages(self):
for package in self.packages:
self.current_package = package
self._unlink_current_package()
return True
# /unlinked ------------------------------ }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "formula"],
required=False,
type='list',
elements='str',
),
path=dict(
default="/usr/local/bin",
required=False,
type='path',
),
state=dict(
default="present",
choices=[
"present", "installed",
"latest", "upgraded", "head",
"linked", "unlinked",
"absent", "removed", "uninstalled",
],
),
update_homebrew=dict(
default=False,
aliases=["update-brew"],
type='bool',
),
upgrade_all=dict(
default=False,
aliases=["upgrade"],
type='bool',
),
install_options=dict(
default=None,
aliases=['options'],
type='list',
)
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p['name']:
packages = p['name']
else:
packages = None
path = p['path']
if path:
path = path.split(':')
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('head', ):
state = 'head'
if state in ('latest', 'upgraded'):
state = 'upgraded'
if state == 'linked':
state = 'linked'
if state == 'unlinked':
state = 'unlinked'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
update_homebrew = p['update_homebrew']
upgrade_all = p['upgrade_all']
p['install_options'] = p['install_options'] or []
install_options = ['--{0}'.format(install_option)
for install_option in p['install_options']]
brew = Homebrew(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all, install_options=install_options)
(failed, changed, message) = brew.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
if __name__ == '__main__':
main()
| gpl-3.0 |
cherylyli/stress-aid | env/lib/python3.5/site-packages/jinja2/lexer.py | 119 | 28238 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
from jinja2._compat import iteritems, implements_iterator, text_type, intern
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
def _make_name_re():
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
return re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
import jinja2
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
# Save some memory here
sys.modules.pop('jinja2._stringdefs')
del _stringdefs
del jinja2._stringdefs
return name_re
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
name_re = _make_name_re()
del _make_name_re
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c(r'(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c(r'\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| mit |
EmadMokhtar/Django | tests/select_related/tests.py | 67 | 9796 | from django.core.exceptions import FieldError
from django.test import SimpleTestCase, TestCase
from .models import (
Bookmark, Domain, Family, Genus, HybridSpecies, Kingdom, Klass, Order,
Phylum, Pizza, Species, TaggedItem,
)
class SelectRelatedTests(TestCase):
@classmethod
def create_tree(cls, stringtree):
"""
Helper to create a complete tree.
"""
names = stringtree.split()
models = [Domain, Kingdom, Phylum, Klass, Order, Family, Genus, Species]
assert len(names) == len(models), (names, models)
parent = None
for name, model in zip(names, models):
try:
obj = model.objects.get(name=name)
except model.DoesNotExist:
obj = model(name=name)
if parent:
setattr(obj, parent.__class__.__name__.lower(), parent)
obj.save()
parent = obj
@classmethod
def setUpTestData(cls):
cls.create_tree("Eukaryota Animalia Anthropoda Insecta Diptera Drosophilidae Drosophila melanogaster")
cls.create_tree("Eukaryota Animalia Chordata Mammalia Primates Hominidae Homo sapiens")
cls.create_tree("Eukaryota Plantae Magnoliophyta Magnoliopsida Fabales Fabaceae Pisum sativum")
cls.create_tree("Eukaryota Fungi Basidiomycota Homobasidiomycatae Agaricales Amanitacae Amanita muscaria")
def test_access_fks_without_select_related(self):
"""
Normally, accessing FKs doesn't fill in related objects
"""
with self.assertNumQueries(8):
fly = Species.objects.get(name="melanogaster")
domain = fly.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, 'Eukaryota')
def test_access_fks_with_select_related(self):
"""
A select_related() call will fill in those related objects without any
extra queries
"""
with self.assertNumQueries(1):
person = (
Species.objects
.select_related('genus__family__order__klass__phylum__kingdom__domain')
.get(name="sapiens")
)
domain = person.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, 'Eukaryota')
def test_list_without_select_related(self):
"""
select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior without select_related.
"""
with self.assertNumQueries(9):
world = Species.objects.all()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [
'Amanitacae',
'Drosophilidae',
'Fabaceae',
'Hominidae',
])
def test_list_with_select_related(self):
"""
select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior with select_related.
"""
with self.assertNumQueries(1):
world = Species.objects.all().select_related()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [
'Amanitacae',
'Drosophilidae',
'Fabaceae',
'Hominidae',
])
def test_list_with_depth(self):
"""
Passing a relationship field lookup specifier to select_related() will
stop the descent at a particular level. This can be used on lists as
well.
"""
with self.assertNumQueries(5):
world = Species.objects.all().select_related('genus__family')
orders = [o.genus.family.order.name for o in world]
self.assertEqual(sorted(orders), ['Agaricales', 'Diptera', 'Fabales', 'Primates'])
def test_select_related_with_extra(self):
s = (Species.objects.all()
.select_related()
.extra(select={'a': 'select_related_species.id + 10'})[0])
self.assertEqual(s.id + 10, s.a)
def test_certain_fields(self):
"""
The optional fields passed to select_related() control which related
models we pull in. This allows for smaller queries.
In this case, we explicitly say to select the 'genus' and
'genus.family' models, leading to the same number of queries as before.
"""
with self.assertNumQueries(1):
world = Species.objects.select_related('genus__family')
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), ['Amanitacae', 'Drosophilidae', 'Fabaceae', 'Hominidae'])
def test_more_certain_fields(self):
"""
In this case, we explicitly say to select the 'genus' and
'genus.family' models, leading to the same number of queries as before.
"""
with self.assertNumQueries(2):
world = Species.objects.filter(genus__name='Amanita')\
.select_related('genus__family')
orders = [o.genus.family.order.name for o in world]
self.assertEqual(orders, ['Agaricales'])
def test_field_traversal(self):
with self.assertNumQueries(1):
s = (Species.objects.all()
.select_related('genus__family__order')
.order_by('id')[0:1].get().genus.family.order.name)
self.assertEqual(s, 'Diptera')
def test_none_clears_list(self):
queryset = Species.objects.select_related('genus').select_related(None)
self.assertIs(queryset.query.select_related, False)
def test_chaining(self):
parent_1, parent_2 = Species.objects.all()[:2]
HybridSpecies.objects.create(name='hybrid', parent_1=parent_1, parent_2=parent_2)
queryset = HybridSpecies.objects.select_related('parent_1').select_related('parent_2')
with self.assertNumQueries(1):
obj = queryset[0]
self.assertEqual(obj.parent_1, parent_1)
self.assertEqual(obj.parent_2, parent_2)
def test_reverse_relation_caching(self):
species = Species.objects.select_related('genus').filter(name='melanogaster').first()
with self.assertNumQueries(0):
self.assertEqual(species.genus.name, 'Drosophila')
# The species_set reverse relation isn't cached.
self.assertEqual(species.genus._state.fields_cache, {})
with self.assertNumQueries(1):
self.assertEqual(species.genus.species_set.first().name, 'melanogaster')
def test_select_related_after_values(self):
"""
Running select_related() after calling values() raises a TypeError
"""
message = "Cannot call select_related() after .values() or .values_list()"
with self.assertRaisesMessage(TypeError, message):
list(Species.objects.values('name').select_related('genus'))
def test_select_related_after_values_list(self):
"""
Running select_related() after calling values_list() raises a TypeError
"""
message = "Cannot call select_related() after .values() or .values_list()"
with self.assertRaisesMessage(TypeError, message):
list(Species.objects.values_list('name').select_related('genus'))
class SelectRelatedValidationTests(SimpleTestCase):
"""
select_related() should thrown an error on fields that do not exist and
non-relational fields.
"""
non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s"
invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s"
def test_non_relational_field(self):
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', 'genus')):
list(Species.objects.select_related('name__some_field'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', 'genus')):
list(Species.objects.select_related('name'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', '(none)')):
list(Domain.objects.select_related('name'))
def test_non_relational_field_nested(self):
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('name', 'family')):
list(Species.objects.select_related('genus__name'))
def test_many_to_many_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('toppings', '(none)')):
list(Pizza.objects.select_related('toppings'))
def test_reverse_relational_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('child_1', 'genus')):
list(Species.objects.select_related('child_1'))
def test_invalid_field(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('invalid_field', 'genus')):
list(Species.objects.select_related('invalid_field'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('related_invalid_field', 'family')):
list(Species.objects.select_related('genus__related_invalid_field'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('invalid_field', '(none)')):
list(Domain.objects.select_related('invalid_field'))
def test_generic_relations(self):
with self.assertRaisesMessage(FieldError, self.invalid_error % ('tags', '')):
list(Bookmark.objects.select_related('tags'))
with self.assertRaisesMessage(FieldError, self.invalid_error % ('content_object', 'content_type')):
list(TaggedItem.objects.select_related('content_object'))
| mit |
plone/plone.app.mosaic | src/plone/app/mosaic/registry.py | 1 | 13263 | # -*- coding: utf-8 -*-
from copy import deepcopy
from plone.app.mosaic.interfaces import IMosaicRegistryAdapter
from plone.app.mosaic.utils import extractFieldInformation
from plone.dexterity.utils import iterSchemataForType
from plone.registry.interfaces import IRegistry
from Products.CMFCore.interfaces._content import IFolderish
from zope.component import adapter
from zope.i18n import translate
from zope.interface import implementer
class DottedDict(dict):
"""A dictionary where you can access nested dicts with dotted names"""
def get(self, k, default=None):
if '.' not in k:
return super(DottedDict, self).get(k, default)
val = self
for x in k.split('.'):
try:
val = val[x]
except KeyError:
return default
return val
def getBool(value):
return value.lower() == 'true'
def getCategoryIndex(tiles, category):
index = 0
for tile in tiles:
if tile['name'] == category:
return index
index += 1
return None
def weightedSort(x):
return x[1]['weight']
def safe_weight_sortkey(x):
weight = x.get('weight', None)
return weight if weight is not None else 9000
@implementer(IMosaicRegistryAdapter)
@adapter(IRegistry)
class MosaicRegistry(object):
"""Adapts a registry object to parse the mosaic settings data"""
prefix = "plone.app.mosaic"
def __init__(self, registry):
self.registry = registry
def parseRegistry(self):
"""Make a dictionary structure for the values in the registry"""
result = DottedDict()
for record in self.registry.records:
if not record.startswith(self.prefix):
continue
splitted = record.split('.')
current = result
for x in splitted[:-1]:
# create the key if it's not there
if x not in current:
current[x] = {}
current = current[x]
# store actual key/value
key = splitted[-1]
current[key] = self.registry.records[record].value
return result
def mapActions(self, settings, config):
for action_type in ['primary_actions', 'secondary_actions']:
config[action_type] = []
key = '{0:s}.{1:s}'.format(self.prefix, action_type)
actions = list(settings.get(key, {}).items())
actions.sort(key=weightedSort)
for key, action in actions:
# sort items
items = action.get('items', {})
if isinstance(items, dict):
items = list(items.values())
if items:
action['items'] = items
action['items'].sort(key=safe_weight_sortkey)
for x in action['items']:
x['value'] = x['name']
if not action['fieldset']:
config[action_type].append(action)
continue
index = getCategoryIndex(config[action_type],
action['fieldset'])
if not index:
config[action_type].append({'name': action['fieldset'],
'label': action['fieldset'],
'actions': []})
index = getCategoryIndex(config[action_type],
action['fieldset'])
config[action_type][index]['actions'].append(action)
# Default Available Actions
key = '{0:s}.default_available_actions'.format(self.prefix)
config['default_available_actions'] = settings.get(key, [])
return config
def mapTilesCategories(self, settings, config):
config['tiles'] = config.get('tiles', [])
categories = settings.get(
'{0:s}.tiles_categories'.format(self.prefix), {})
sorted_categories = list(categories.items())
sorted_categories.sort(key=weightedSort)
for key, category in sorted_categories:
category['tiles'] = []
config['tiles'].append(category)
return config
def mapFormatCategories(self, settings, config):
config['formats'] = config.get('formats', [])
categories = settings.get(
'{0:s}.format_categories'.format(self.prefix), {})
sorted_categories = list(categories.items())
sorted_categories.sort(key=weightedSort)
for key, category in sorted_categories:
category['actions'] = []
config['formats'].append(category)
return config
def mapFormats(self, settings, config):
formats = settings.get('{0:s}.formats'.format(self.prefix), {})
for key, format in formats.items():
index = getCategoryIndex(config['formats'], format['category'])
if index is not None:
config['formats'][index]['actions'].append(format)
# sort the formats
for format in config['formats']:
format['actions'].sort(key=safe_weight_sortkey)
return config
def mapTinyMCEActionCategories(self, settings, config):
config['richtext_toolbar'] = config.get('richtext_toolbar', [])
config['richtext_contextmenu'] = config.get('richtext_contextmenu', [])
categories = settings.get(
'{0:s}.tinymce_categories'.format(self.prefix), {})
sorted_categories = list(categories.items())
sorted_categories.sort(key=weightedSort)
for key, category in sorted_categories:
category['actions'] = []
config['richtext_toolbar'].append(category)
config['richtext_contextmenu'] = deepcopy(config['richtext_toolbar'])
return config
def mapTinyMCEToolbarFormats(self, settings, config):
actions = settings.get(
'{0:s}.richtext_toolbar'.format(self.prefix), {})
for key, action in actions.items():
index = getCategoryIndex(config['richtext_toolbar'], action['category']) # noqa
if index is not None:
config['richtext_toolbar'][index]['actions'].append(action)
for group in config['richtext_toolbar']:
group['actions'].sort(key=safe_weight_sortkey)
return config
def mapTinyMCEContextMenuFormats(self, settings, config):
actions = settings.get(
'{0:s}.richtext_contextmenu'.format(self.prefix), {})
for key, action in actions.items():
index = getCategoryIndex(config['richtext_contextmenu'], action['category']) # noqa
if index is not None:
config['richtext_contextmenu'][index]['actions'].append(action)
for group in config['richtext_contextmenu']:
group['actions'].sort(key=safe_weight_sortkey)
return config
# def mapStructureTiles(self, settings, config):
# # Structure Tiles
# tiles = settings.get('%s.structure_tiles' % self.prefix, {})
#
# for key, tile in tiles.items():
# if not 'category' in tile:
# continue
# index = getCategoryIndex(config['tiles'], tile['category'])
# if index is not None:
# config['tiles'][index]['tiles'].append(tile)
# for tile in config['tiles']:
# tile['tiles'].sort(key=safe_weight_sortkey)
# return config
#
# def mapApplicationTiles(self, settings, config):
# tiles = settings.get('%s.app_tiles' % self.prefix, {})
# for key, tile in tiles.items():
# if not 'category' in tile:
# continue
# index = getCategoryIndex(config['tiles'], tile['category'])
# if index is not None:
# config['tiles'][index]['tiles'].append(tile)
# for tile in config['tiles']:
# tile['tiles'].sort(key=safe_weight_sortkey)
# return config
def mapTiles(self, settings, config, tile_category):
tiles = settings.get(
'{0:s}.{1:s}'.format(self.prefix, tile_category), {})
for key, tile in tiles.items():
if 'category' not in tile:
continue
index = getCategoryIndex(config['tiles'], tile['category'])
if index is not None:
config['tiles'][index]['tiles'].append(tile)
for tile in config['tiles']:
tile['tiles'].sort(key=safe_weight_sortkey)
return config
# BBB: needs a bit of thought, I'm nowhere near satisfied with this
# solution
@classmethod
def actionsForWidget(cls, settings, widget_name):
"""Looks up which (mosaic) actions are associated to a certain z3c
widget.
The lookup is made in 2 parts:
- First the registry is looked for a key named
plone.app.mosaic.widget_actions.<'full.widget.dotted.name'
.replace('.','_')>
- If it is not found, looks for
plone.app.mosaic.default_widget_actions
The rationale is that this way the three default actions are there by
default, and only if you need special stuff (probably if you provide an
inline widget) you can override the default, but for the simple use
case no interaction is needed
"""
actions = settings.get(
'{0:s}.widget_actions.{1:s}.actions'.format(
cls.prefix, widget_name.replace('.', '_')),
default=None
)
if actions is not None:
return actions
return settings.get(
'{0:s}.default_widget_actions'.format(cls.prefix),
default=list()
)
def mapFieldTiles(self, settings, config, kwargs):
args = {
'type': None,
'context': None,
'request': None,
}
args.update(kwargs)
if args['type'] is None:
return config
prefixes = []
registry_omitted = settings.get(
'{0:s}.omitted_fields.{1:s}'.format(
self.prefix, args['type'].replace('.', '_')),
default=None,
)
if registry_omitted is None:
registry_omitted = settings.get(
self.prefix + '.default_omitted_fields',
default=[],
)
for index, schema in enumerate(iterSchemataForType(args['type'])):
prefix = ''
if index > 0:
prefix = schema.__name__
if prefix in prefixes:
prefix = schema.__identifier__
prefixes.append(prefix)
for fieldconfig in extractFieldInformation(
schema, args['context'], args['request'], prefix):
if fieldconfig['id'] not in registry_omitted:
label = translate(fieldconfig['title'],
context=args['request'])
tileconfig = {
'id': 'formfield-form-widgets-{0:s}'.format(
fieldconfig['name']),
'name': fieldconfig['name'],
'label': label,
'category': 'fields',
'tile_type': 'field',
'read_only': fieldconfig['readonly'],
'favorite': False,
'widget': fieldconfig['widget'],
'available_actions': self.actionsForWidget(
settings,
fieldconfig['widget']
),
}
index = getCategoryIndex(config['tiles'], 'fields')
if index is not None:
config['tiles'][index]['tiles'].append(tileconfig)
return config
def __call__(self, **kwargs):
settings = self.parseRegistry()
config = {}
config = self.mapFormatCategories(settings, config)
config = self.mapFormats(settings, config)
config = self.mapTinyMCEActionCategories(settings, config)
config = self.mapTinyMCEToolbarFormats(settings, config)
config = self.mapTinyMCEContextMenuFormats(settings, config)
config = self.mapActions(settings, config)
config = self.mapTilesCategories(settings, config)
for tile_category in ['structure_tiles', 'app_tiles']:
config = self.mapTiles(settings, config, tile_category)
config = self.mapFieldTiles(settings, config, kwargs)
args = {
'type': None,
'context': None,
'request': None,
}
args.update(kwargs)
if IFolderish.providedBy(args['context']):
config['parent'] = args['context'].absolute_url() + "/"
elif args['context']:
config['parent'] = getattr(args['context'].aq_inner, 'aq_parent',
None).absolute_url() + "/"
else:
# context can be None, at least in tests. Do nothing
# then. See test_config in test_mosaicregistry.py
pass
return config
| gpl-2.0 |
grantmcconnaughey/django-sql-explorer | explorer/counter.py | 9 | 2054 | # Taken from http://code.activestate.com/recipes/576611-counter-class/
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds) | mit |
shakamunyi/tensorflow | tensorflow/compiler/tests/unary_ops_test.py | 4 | 22277 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class UnaryOpsTest(XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self, op, inp, expected,
equality_test=None, rtol=1e-3, atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.test_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
equality_test = self.assertAllClose
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
array_ops.diag,
np.array([1, 2, 3, 4], dtype=dtype),
np.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag_part,
np.arange(3 * 2 * 4).reshape([3, 2, 4]).astype(dtype),
np.array([[0, 5], [8, 13], [16, 21]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0, 1.3169579, 1.76274717, 2.06343707],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([0.88137359, 1.44363548, 1.81844646, 2.09471255],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array([0.10033535, 0.20273255, 0.3095196, 0.42364893],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.54308063, 3.76219569, 10.067662, 27.30823284],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
self._assertOpOutputMatchesExpected(nn_ops.l2_loss, dtype(4), dtype(8))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype))
# TODO(b/34703906): improve log1p implementation and make tolerance
# tighter.
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.rint,
np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]], dtype=dtype),
expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]], dtype=dtype),
expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.17520119, 3.62686041, 10.01787493, 27.2899172],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array([1.55740772, -2.18503986, -0.14254654, 1.15782128],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.elu,
np.array([[-1, 0, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.selu,
np.array([[-1, 0, 1]], dtype=dtype),
expected=np.array([[-1.11133074, 0., 1.05070099]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.66666669, -0.5, 0, 0.5, 0.66666669]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.quantize_and_dequantize_v2(x, -127, 127, True, 8),
np.array([-1, -0.5, 0, 0.3], dtype=dtype),
expected=np.array([-1, -64.0 / 127, 0, 38.0 / 127], dtype=dtype))
def testIntOps(self):
for dtype in self.int_types:
self._assertOpOutputMatchesExpected(
bitwise_ops.invert,
np.array([0, -1, 1, 16, 42], dtype=dtype),
expected=np.array([-1, 0, -2, -17, -43], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.ones_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[1, 1], [1, 1]], dtype=dtype))
# TODO(phawkins): these tests fail unless fastmath optimizations
# are disabled. Use more robust IsInf/IsNaN detection and enable these
# tests.
@unittest.skip("test case fails in fast-math mode")
def testIsInfAndIsNan(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.is_inf,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=np.bool))
self._assertOpOutputMatchesExpected(
math_ops.is_nan,
np.array([[np.NINF, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]],
dtype=dtype),
expected=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.bool))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]],
dtype=np.float32),
expected=np.array([10., 26.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = [dtypes.bool, dtypes.int32, dtypes.float32]
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
def testDepthToSpace(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: array_ops.depth_to_space(x, block_size=2),
np.array([[[[1, 2, 3, 4]]]], dtype=dtype),
expected=np.array([[[[1], [2]],
[[3], [4]]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.depth_to_space(x, block_size=2),
np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]], dtype=dtype),
expected=np.array([[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.depth_to_space(x, block_size=2),
np.array([[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]], dtype=dtype),
expected=np.array([[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]], dtype=dtype))
def testSpaceToDepth(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: array_ops.space_to_depth(x, block_size=2),
np.array([[[[1], [2]],
[[3], [4]]]], dtype=dtype),
expected=np.array([[[[1, 2, 3, 4]]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.space_to_depth(x, block_size=2),
np.array([[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]], dtype=dtype),
expected=np.array([[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x: array_ops.space_to_depth(x, block_size=2),
np.array([[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]], dtype=dtype),
expected=np.array([[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]], dtype=dtype))
def _assertSoftplusMatchesExpected(self, features, dtype):
features = np.array(features, dtype=dtype)
zero = np.asarray(0).astype(dtype)
expected = np.logaddexp(zero, features)
self._assertOpOutputMatchesExpected(
nn_ops.softplus, features, expected=expected)
def testSoftplus(self):
for dtype in self.float_types:
self._assertSoftplusMatchesExpected([[-2, 0, 8]], dtype)
self._assertSoftplusMatchesExpected(
[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]], dtype)
log_eps = np.log(np.finfo(dtype).eps)
one = dtype(1)
ten = dtype(10)
self._assertSoftplusMatchesExpected([
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten], dtype)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
ruibarreira/linuxtrail | usr/lib/python3.4/posixpath.py | 92 | 13448 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
sep = _get_sep(a)
path = a
try:
for b in p:
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except TypeError:
if all(isinstance(s, (str, bytes)) for s in (a,) + p):
# Must have a mixture of text and binary data
raise TypeError("Can't mix strings and bytes in path "
"components") from None
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (OSError, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except OSError:
return False
return True
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except OSError:
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
try:
s2 = os.lstat(parent)
except OSError:
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
environ = getattr(os, 'environb', None)
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
environ = os.environ
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(name)])
else:
value = environ[name]
except KeyError:
i = j
else:
tail = path[j:]
path = path[:i] + value
i = len(path)
path += tail
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing ang eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| gpl-3.0 |
afdnlw/dnf | tests/test_cli_format.py | 13 | 2245 | # Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.cli.format import format_time, format_number
import dnf.cli.format
import tests.support
class FormatTest(tests.support.TestCase):
def test_format_time(self):
self.assertEquals(format_time(None), '--:--')
self.assertEquals(format_time(-1), '--:--')
self.assertEquals(format_time(12*60+34), '12:34')
self.assertEquals(format_time(12*3600+34*60+56), '754:56')
self.assertEquals(format_time(12*3600+34*60+56, use_hours=True), '12:34:56')
def test_format_number(self):
self.assertEquals(format_number(None), '0.0 ')
self.assertEquals(format_number(-1), '-1 ')
self.assertEquals(format_number(1.0), '1.0 ')
self.assertEquals(format_number(999.0), '999 ')
self.assertEquals(format_number(1000.0), '1.0 k')
self.assertEquals(format_number(1 << 20), '1.0 M')
self.assertEquals(format_number(1 << 30), '1.0 G')
self.assertEquals(format_number(1e6, SI=1), '1.0 M')
self.assertEquals(format_number(1e9, SI=1), '1.0 G')
def test_indent_block(self):
s = 'big\nbrown\nbag'
out = dnf.cli.format.indent_block(s)
self.assertEqual(out, ' big\n brown\n bag')
| gpl-2.0 |
chenyyx/scikit-learn-doc-zh | examples/en/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| gpl-3.0 |
hexlism/xx_net | python27/1.0/lib/stringprep.py | 30 | 13794 | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == u" "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536,917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| bsd-2-clause |
sankhesh/VTK | ThirdParty/Twisted/twisted/web/test/test_agent.py | 19 | 95379 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.client.Agent} and related new client APIs.
"""
import cookielib
import zlib
from StringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.web import client, error, http_headers
from twisted.web._newclient import RequestNotSent, RequestTransmissionFailed
from twisted.web._newclient import ResponseNeverReceived, ResponseFailed
from twisted.web._newclient import PotentialDataLoss
from twisted.internet import defer, task
from twisted.python.failure import Failure
from twisted.python.components import proxyForInterface
from twisted.test.proto_helpers import StringTransport, MemoryReactorClock
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionRefusedError, ConnectionDone
from twisted.internet.error import ConnectionLost
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.defer import Deferred, succeed, CancelledError
from twisted.internet.endpoints import TCP4ClientEndpoint, SSL4ClientEndpoint
from twisted.web.client import (FileBodyProducer, Request, HTTPConnectionPool,
ResponseDone, _HTTP11ClientFactory)
from twisted.web.iweb import UNKNOWN_LENGTH, IAgent, IBodyProducer, IResponse
from twisted.web.http_headers import Headers
from twisted.web._newclient import HTTP11ClientProtocol, Response
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from zope.interface.declarations import implementer
from twisted.web.iweb import IPolicyForHTTPS
from twisted.python.deprecate import getDeprecationWarningString
from twisted.python.versions import Version
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.error import SchemeNotSupported
try:
from twisted.internet import ssl
from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
except ImportError:
ssl = None
else:
from twisted.internet._sslverify import ClientTLSOptions
class StubHTTPProtocol(Protocol):
"""
A protocol like L{HTTP11ClientProtocol} but which does not actually know
HTTP/1.1 and only collects requests in a list.
@ivar requests: A C{list} of two-tuples. Each time a request is made, a
tuple consisting of the request and the L{Deferred} returned from the
request method is appended to this list.
"""
def __init__(self):
self.requests = []
self.state = 'QUIESCENT'
def request(self, request):
"""
Capture the given request for later inspection.
@return: A L{Deferred} which this code will never fire.
"""
result = Deferred()
self.requests.append((request, result))
return result
class FileConsumer(object):
def __init__(self, outputFile):
self.outputFile = outputFile
def write(self, bytes):
self.outputFile.write(bytes)
class FileBodyProducerTests(TestCase):
"""
Tests for the L{FileBodyProducer} which reads bytes from a file and writes
them to an L{IConsumer}.
"""
def _termination(self):
"""
This method can be used as the C{terminationPredicateFactory} for a
L{Cooperator}. It returns a predicate which immediately returns
C{False}, indicating that no more work should be done this iteration.
This has the result of only allowing one iteration of a cooperative
task to be run per L{Cooperator} iteration.
"""
return lambda: True
def setUp(self):
"""
Create a L{Cooperator} hooked up to an easily controlled, deterministic
scheduler to use with L{FileBodyProducer}.
"""
self._scheduled = []
self.cooperator = task.Cooperator(
self._termination, self._scheduled.append)
def test_interface(self):
"""
L{FileBodyProducer} instances provide L{IBodyProducer}.
"""
self.assertTrue(verifyObject(
IBodyProducer, FileBodyProducer(StringIO(""))))
def test_unknownLength(self):
"""
If the L{FileBodyProducer} is constructed with a file-like object
without either a C{seek} or C{tell} method, its C{length} attribute is
set to C{UNKNOWN_LENGTH}.
"""
class HasSeek(object):
def seek(self, offset, whence):
pass
class HasTell(object):
def tell(self):
pass
producer = FileBodyProducer(HasSeek())
self.assertEqual(UNKNOWN_LENGTH, producer.length)
producer = FileBodyProducer(HasTell())
self.assertEqual(UNKNOWN_LENGTH, producer.length)
def test_knownLength(self):
"""
If the L{FileBodyProducer} is constructed with a file-like object with
both C{seek} and C{tell} methods, its C{length} attribute is set to the
size of the file as determined by those methods.
"""
inputBytes = "here are some bytes"
inputFile = StringIO(inputBytes)
inputFile.seek(5)
producer = FileBodyProducer(inputFile)
self.assertEqual(len(inputBytes) - 5, producer.length)
self.assertEqual(inputFile.tell(), 5)
def test_defaultCooperator(self):
"""
If no L{Cooperator} instance is passed to L{FileBodyProducer}, the
global cooperator is used.
"""
producer = FileBodyProducer(StringIO(""))
self.assertEqual(task.cooperate, producer._cooperate)
def test_startProducing(self):
"""
L{FileBodyProducer.startProducing} starts writing bytes from the input
file to the given L{IConsumer} and returns a L{Deferred} which fires
when they have all been written.
"""
expectedResult = "hello, world"
readSize = 3
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
complete = producer.startProducing(consumer)
for i in range(len(expectedResult) // readSize + 1):
self._scheduled.pop(0)()
self.assertEqual([], self._scheduled)
self.assertEqual(expectedResult, output.getvalue())
self.assertEqual(None, self.successResultOf(complete))
def test_inputClosedAtEOF(self):
"""
When L{FileBodyProducer} reaches end-of-file on the input file given to
it, the input file is closed.
"""
readSize = 4
inputBytes = "some friendly bytes"
inputFile = StringIO(inputBytes)
producer = FileBodyProducer(inputFile, self.cooperator, readSize)
consumer = FileConsumer(StringIO())
producer.startProducing(consumer)
for i in range(len(inputBytes) // readSize + 2):
self._scheduled.pop(0)()
self.assertTrue(inputFile.closed)
def test_failedReadWhileProducing(self):
"""
If a read from the input file fails while producing bytes to the
consumer, the L{Deferred} returned by
L{FileBodyProducer.startProducing} fires with a L{Failure} wrapping
that exception.
"""
class BrokenFile(object):
def read(self, count):
raise IOError("Simulated bad thing")
producer = FileBodyProducer(BrokenFile(), self.cooperator)
complete = producer.startProducing(FileConsumer(StringIO()))
self._scheduled.pop(0)()
self.failureResultOf(complete).trap(IOError)
def test_stopProducing(self):
"""
L{FileBodyProducer.stopProducing} stops the underlying L{IPullProducer}
and the cooperative task responsible for calling C{resumeProducing} and
closes the input file but does not cause the L{Deferred} returned by
C{startProducing} to fire.
"""
expectedResult = "hello, world"
readSize = 3
output = StringIO()
consumer = FileConsumer(output)
inputFile = StringIO(expectedResult)
producer = FileBodyProducer(
inputFile, self.cooperator, readSize)
complete = producer.startProducing(consumer)
producer.stopProducing()
self.assertTrue(inputFile.closed)
self._scheduled.pop(0)()
self.assertEqual("", output.getvalue())
self.assertNoResult(complete)
def test_pauseProducing(self):
"""
L{FileBodyProducer.pauseProducing} temporarily suspends writing bytes
from the input file to the given L{IConsumer}.
"""
expectedResult = "hello, world"
readSize = 5
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
complete = producer.startProducing(consumer)
self._scheduled.pop(0)()
self.assertEqual(output.getvalue(), expectedResult[:5])
producer.pauseProducing()
# Sort of depends on an implementation detail of Cooperator: even
# though the only task is paused, there's still a scheduled call. If
# this were to go away because Cooperator became smart enough to cancel
# this call in this case, that would be fine.
self._scheduled.pop(0)()
# Since the producer is paused, no new data should be here.
self.assertEqual(output.getvalue(), expectedResult[:5])
self.assertEqual([], self._scheduled)
self.assertNoResult(complete)
def test_resumeProducing(self):
"""
L{FileBodyProducer.resumeProducing} re-commences writing bytes from the
input file to the given L{IConsumer} after it was previously paused
with L{FileBodyProducer.pauseProducing}.
"""
expectedResult = "hello, world"
readSize = 5
output = StringIO()
consumer = FileConsumer(output)
producer = FileBodyProducer(
StringIO(expectedResult), self.cooperator, readSize)
producer.startProducing(consumer)
self._scheduled.pop(0)()
self.assertEqual(expectedResult[:readSize], output.getvalue())
producer.pauseProducing()
producer.resumeProducing()
self._scheduled.pop(0)()
self.assertEqual(expectedResult[:readSize * 2], output.getvalue())
class FakeReactorAndConnectMixin:
"""
A test mixin providing a testable C{Reactor} class and a dummy C{connect}
method which allows instances to pretend to be endpoints.
"""
Reactor = MemoryReactorClock
@implementer(IPolicyForHTTPS)
class StubPolicy(object):
"""
A stub policy for HTTPS URIs which allows HTTPS tests to run even if
pyOpenSSL isn't installed.
"""
def creatorForNetloc(self, hostname, port):
"""
Don't actually do anything.
@param hostname: ignored
@param port: ignored
"""
class StubEndpoint(object):
"""
Endpoint that wraps existing endpoint, substitutes StubHTTPProtocol, and
resulting protocol instances are attached to the given test case.
"""
def __init__(self, endpoint, testCase):
self.endpoint = endpoint
self.testCase = testCase
self.factory = _HTTP11ClientFactory(lambda p: None)
self.protocol = StubHTTPProtocol()
self.factory.buildProtocol = lambda addr: self.protocol
def connect(self, ignoredFactory):
self.testCase.protocol = self.protocol
self.endpoint.connect(self.factory)
return succeed(self.protocol)
def buildAgentForWrapperTest(self, reactor):
"""
Return an Agent suitable for use in tests that wrap the Agent and want
both a fake reactor and StubHTTPProtocol.
"""
agent = client.Agent(reactor, self.StubPolicy())
_oldGetEndpoint = agent._getEndpoint
agent._getEndpoint = lambda *args: (
self.StubEndpoint(_oldGetEndpoint(*args), self))
return agent
def connect(self, factory):
"""
Fake implementation of an endpoint which synchronously
succeeds with an instance of L{StubHTTPProtocol} for ease of
testing.
"""
protocol = StubHTTPProtocol()
protocol.makeConnection(None)
self.protocol = protocol
return succeed(protocol)
class DummyEndpoint(object):
"""
An endpoint that uses a fake transport.
"""
def connect(self, factory):
protocol = factory.buildProtocol(None)
protocol.makeConnection(StringTransport())
return succeed(protocol)
class BadEndpoint(object):
"""
An endpoint that shouldn't be called.
"""
def connect(self, factory):
raise RuntimeError("This endpoint should not have been used.")
class DummyFactory(Factory):
"""
Create C{StubHTTPProtocol} instances.
"""
def __init__(self, quiescentCallback):
pass
protocol = StubHTTPProtocol
class HTTPConnectionPoolTests(TestCase, FakeReactorAndConnectMixin):
"""
Tests for the L{HTTPConnectionPool} class.
"""
def setUp(self):
self.fakeReactor = self.Reactor()
self.pool = HTTPConnectionPool(self.fakeReactor)
self.pool._factory = DummyFactory
# The retry code path is tested in HTTPConnectionPoolRetryTests:
self.pool.retryAutomatically = False
def test_getReturnsNewIfCacheEmpty(self):
"""
If there are no cached connections,
L{HTTPConnectionPool.getConnection} returns a new connection.
"""
self.assertEqual(self.pool._connections, {})
def gotConnection(conn):
self.assertIsInstance(conn, StubHTTPProtocol)
# The new connection is not stored in the pool:
self.assertNotIn(conn, self.pool._connections.values())
unknownKey = 12245
d = self.pool.getConnection(unknownKey, DummyEndpoint())
return d.addCallback(gotConnection)
def test_putStartsTimeout(self):
"""
If a connection is put back to the pool, a 240-sec timeout is started.
When the timeout hits, the connection is closed and removed from the
pool.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
self.pool._putConnection(("http", "example.com", 80), protocol)
# Connection is in pool, still not closed:
self.assertEqual(protocol.transport.disconnecting, False)
self.assertIn(protocol,
self.pool._connections[("http", "example.com", 80)])
# Advance 239 seconds, still not closed:
self.fakeReactor.advance(239)
self.assertEqual(protocol.transport.disconnecting, False)
self.assertIn(protocol,
self.pool._connections[("http", "example.com", 80)])
self.assertIn(protocol, self.pool._timeouts)
# Advance past 240 seconds, connection will be closed:
self.fakeReactor.advance(1.1)
self.assertEqual(protocol.transport.disconnecting, True)
self.assertNotIn(protocol,
self.pool._connections[("http", "example.com", 80)])
self.assertNotIn(protocol, self.pool._timeouts)
def test_putExceedsMaxPersistent(self):
"""
If an idle connection is put back in the cache and the max number of
persistent connections has been exceeded, one of the connections is
closed and removed from the cache.
"""
pool = self.pool
# We start out with two cached connection, the max:
origCached = [StubHTTPProtocol(), StubHTTPProtocol()]
for p in origCached:
p.makeConnection(StringTransport())
pool._putConnection(("http", "example.com", 80), p)
self.assertEqual(pool._connections[("http", "example.com", 80)],
origCached)
timeouts = pool._timeouts.copy()
# Now we add another one:
newProtocol = StubHTTPProtocol()
newProtocol.makeConnection(StringTransport())
pool._putConnection(("http", "example.com", 80), newProtocol)
# The oldest cached connections will be removed and disconnected:
newCached = pool._connections[("http", "example.com", 80)]
self.assertEqual(len(newCached), 2)
self.assertEqual(newCached, [origCached[1], newProtocol])
self.assertEqual([p.transport.disconnecting for p in newCached],
[False, False])
self.assertEqual(origCached[0].transport.disconnecting, True)
self.assertTrue(timeouts[origCached[0]].cancelled)
self.assertNotIn(origCached[0], pool._timeouts)
def test_maxPersistentPerHost(self):
"""
C{maxPersistentPerHost} is enforced per C{(scheme, host, port)}:
different keys have different max connections.
"""
def addProtocol(scheme, host, port):
p = StubHTTPProtocol()
p.makeConnection(StringTransport())
self.pool._putConnection((scheme, host, port), p)
return p
persistent = []
persistent.append(addProtocol("http", "example.com", 80))
persistent.append(addProtocol("http", "example.com", 80))
addProtocol("https", "example.com", 443)
addProtocol("http", "www2.example.com", 80)
self.assertEqual(
self.pool._connections[("http", "example.com", 80)], persistent)
self.assertEqual(
len(self.pool._connections[("https", "example.com", 443)]), 1)
self.assertEqual(
len(self.pool._connections[("http", "www2.example.com", 80)]), 1)
def test_getCachedConnection(self):
"""
Getting an address which has a cached connection returns the cached
connection, removes it from the cache and cancels its timeout.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
self.pool._putConnection(("http", "example.com", 80), protocol)
def gotConnection(conn):
# We got the cached connection:
self.assertIdentical(protocol, conn)
self.assertNotIn(
conn, self.pool._connections[("http", "example.com", 80)])
# And the timeout was cancelled:
self.fakeReactor.advance(241)
self.assertEqual(conn.transport.disconnecting, False)
self.assertNotIn(conn, self.pool._timeouts)
return self.pool.getConnection(("http", "example.com", 80),
BadEndpoint(),
).addCallback(gotConnection)
def test_newConnection(self):
"""
The pool's C{_newConnection} method constructs a new connection.
"""
# We start out with one cached connection:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
key = 12245
self.pool._putConnection(key, protocol)
def gotConnection(newConnection):
# We got a new connection:
self.assertNotIdentical(protocol, newConnection)
# And the old connection is still there:
self.assertIn(protocol, self.pool._connections[key])
# While the new connection is not:
self.assertNotIn(newConnection, self.pool._connections.values())
d = self.pool._newConnection(key, DummyEndpoint())
return d.addCallback(gotConnection)
def test_getSkipsDisconnected(self):
"""
When getting connections out of the cache, disconnected connections
are removed and not returned.
"""
pool = self.pool
key = ("http", "example.com", 80)
# We start out with two cached connection, the max:
origCached = [StubHTTPProtocol(), StubHTTPProtocol()]
for p in origCached:
p.makeConnection(StringTransport())
pool._putConnection(key, p)
self.assertEqual(pool._connections[key], origCached)
# We close the first one:
origCached[0].state = "DISCONNECTED"
# Now, when we retrive connections we should get the *second* one:
result = []
self.pool.getConnection(key,
BadEndpoint()).addCallback(result.append)
self.assertIdentical(result[0], origCached[1])
# And both the disconnected and removed connections should be out of
# the cache:
self.assertEqual(pool._connections[key], [])
self.assertEqual(pool._timeouts, {})
def test_putNotQuiescent(self):
"""
If a non-quiescent connection is put back in the cache, an error is
logged.
"""
protocol = StubHTTPProtocol()
# By default state is QUIESCENT
self.assertEqual(protocol.state, "QUIESCENT")
protocol.state = "NOTQUIESCENT"
self.pool._putConnection(("http", "example.com", 80), protocol)
exc, = self.flushLoggedErrors(RuntimeError)
self.assertEqual(
exc.value.args[0],
"BUG: Non-quiescent protocol added to connection pool.")
self.assertIdentical(None, self.pool._connections.get(
("http", "example.com", 80)))
def test_getUsesQuiescentCallback(self):
"""
When L{HTTPConnectionPool.getConnection} connects, it returns a
C{Deferred} that fires with an instance of L{HTTP11ClientProtocol}
that has the correct quiescent callback attached. When this callback
is called the protocol is returned to the cache correctly, using the
right key.
"""
class StringEndpoint(object):
def connect(self, factory):
p = factory.buildProtocol(None)
p.makeConnection(StringTransport())
return succeed(p)
pool = HTTPConnectionPool(self.fakeReactor, True)
pool.retryAutomatically = False
result = []
key = "a key"
pool.getConnection(
key, StringEndpoint()).addCallback(
result.append)
protocol = result[0]
self.assertIsInstance(protocol, HTTP11ClientProtocol)
# Now that we have protocol instance, lets try to put it back in the
# pool:
protocol._state = "QUIESCENT"
protocol._quiescentCallback(protocol)
# If we try to retrive a connection to same destination again, we
# should get the same protocol, because it should've been added back
# to the pool:
result2 = []
pool.getConnection(
key, StringEndpoint()).addCallback(
result2.append)
self.assertIdentical(result2[0], protocol)
def test_closeCachedConnections(self):
"""
L{HTTPConnectionPool.closeCachedConnections} closes all cached
connections and removes them from the cache. It returns a Deferred
that fires when they have all lost their connections.
"""
persistent = []
def addProtocol(scheme, host, port):
p = HTTP11ClientProtocol()
p.makeConnection(StringTransport())
self.pool._putConnection((scheme, host, port), p)
persistent.append(p)
addProtocol("http", "example.com", 80)
addProtocol("http", "www2.example.com", 80)
doneDeferred = self.pool.closeCachedConnections()
# Connections have begun disconnecting:
for p in persistent:
self.assertEqual(p.transport.disconnecting, True)
self.assertEqual(self.pool._connections, {})
# All timeouts were cancelled and removed:
for dc in self.fakeReactor.getDelayedCalls():
self.assertEqual(dc.cancelled, True)
self.assertEqual(self.pool._timeouts, {})
# Returned Deferred fires when all connections have been closed:
result = []
doneDeferred.addCallback(result.append)
self.assertEqual(result, [])
persistent[0].connectionLost(Failure(ConnectionDone()))
self.assertEqual(result, [])
persistent[1].connectionLost(Failure(ConnectionDone()))
self.assertEqual(result, [None])
def test_cancelGetConnectionCancelsEndpointConnect(self):
"""
Cancelling the C{Deferred} returned from
L{HTTPConnectionPool.getConnection} cancels the C{Deferred} returned
by opening a new connection with the given endpoint.
"""
self.assertEqual(self.pool._connections, {})
connectionResult = Deferred()
class Endpoint:
def connect(self, factory):
return connectionResult
d = self.pool.getConnection(12345, Endpoint())
d.cancel()
self.assertEqual(self.failureResultOf(connectionResult).type,
CancelledError)
class AgentTestsMixin(object):
"""
Tests for any L{IAgent} implementation.
"""
def test_interface(self):
"""
The agent object provides L{IAgent}.
"""
self.assertTrue(verifyObject(IAgent, self.makeAgent()))
class AgentTests(TestCase, FakeReactorAndConnectMixin, AgentTestsMixin):
"""
Tests for the new HTTP client API provided by L{Agent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.Agent} instance
"""
return client.Agent(self.reactor)
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_defaultPool(self):
"""
If no pool is passed in, the L{Agent} creates a non-persistent pool.
"""
agent = client.Agent(self.reactor)
self.assertIsInstance(agent._pool, HTTPConnectionPool)
self.assertEqual(agent._pool.persistent, False)
self.assertIdentical(agent._reactor, agent._pool._reactor)
def test_persistent(self):
"""
If C{persistent} is set to C{True} on the L{HTTPConnectionPool} (the
default), C{Request}s are created with their C{persistent} flag set to
C{True}.
"""
pool = HTTPConnectionPool(self.reactor)
agent = client.Agent(self.reactor, pool=pool)
agent._getEndpoint = lambda *args: self
agent.request("GET", "http://127.0.0.1")
self.assertEqual(self.protocol.requests[0][0].persistent, True)
def test_nonPersistent(self):
"""
If C{persistent} is set to C{False} when creating the
L{HTTPConnectionPool}, C{Request}s are created with their
C{persistent} flag set to C{False}.
Elsewhere in the tests for the underlying HTTP code we ensure that
this will result in the disconnection of the HTTP protocol once the
request is done, so that the connection will not be returned to the
pool.
"""
pool = HTTPConnectionPool(self.reactor, persistent=False)
agent = client.Agent(self.reactor, pool=pool)
agent._getEndpoint = lambda *args: self
agent.request("GET", "http://127.0.0.1")
self.assertEqual(self.protocol.requests[0][0].persistent, False)
def test_connectUsesConnectionPool(self):
"""
When a connection is made by the Agent, it uses its pool's
C{getConnection} method to do so, with the endpoint returned by
C{self._getEndpoint}. The key used is C{(scheme, host, port)}.
"""
endpoint = DummyEndpoint()
class MyAgent(client.Agent):
def _getEndpoint(this, scheme, host, port):
self.assertEqual((scheme, host, port),
("http", "foo", 80))
return endpoint
class DummyPool(object):
connected = False
persistent = False
def getConnection(this, key, ep):
this.connected = True
self.assertEqual(ep, endpoint)
# This is the key the default Agent uses, others will have
# different keys:
self.assertEqual(key, ("http", "foo", 80))
return defer.succeed(StubHTTPProtocol())
pool = DummyPool()
agent = MyAgent(self.reactor, pool=pool)
self.assertIdentical(pool, agent._pool)
headers = http_headers.Headers()
headers.addRawHeader("host", "foo")
bodyProducer = object()
agent.request('GET', 'http://foo/',
bodyProducer=bodyProducer, headers=headers)
self.assertEqual(agent._pool.connected, True)
def test_unsupportedScheme(self):
"""
L{Agent.request} returns a L{Deferred} which fails with
L{SchemeNotSupported} if the scheme of the URI passed to it is not
C{'http'}.
"""
return self.assertFailure(
self.agent.request('GET', 'mailto:alice@example.com'),
SchemeNotSupported)
def test_connectionFailed(self):
"""
The L{Deferred} returned by L{Agent.request} fires with a L{Failure} if
the TCP connection attempt fails.
"""
result = self.agent.request('GET', 'http://foo/')
# Cause the connection to be refused
host, port, factory = self.reactor.tcpClients.pop()[:3]
factory.clientConnectionFailed(None, Failure(ConnectionRefusedError()))
return self.assertFailure(result, ConnectionRefusedError)
def test_connectHTTP(self):
"""
L{Agent._getEndpoint} return a C{TCP4ClientEndpoint} when passed a
scheme of C{'http'}.
"""
expectedHost = 'example.com'
expectedPort = 1234
endpoint = self.agent._getEndpoint('http', expectedHost, expectedPort)
self.assertEqual(endpoint._host, expectedHost)
self.assertEqual(endpoint._port, expectedPort)
self.assertIsInstance(endpoint, TCP4ClientEndpoint)
def test_connectHTTPSCustomContextFactory(self):
"""
If a context factory is passed to L{Agent.__init__} it will be used to
determine the SSL parameters for HTTPS requests. When an HTTPS request
is made, the hostname and port number of the request URL will be passed
to the context factory's C{getContext} method. The resulting context
object will be used to establish the SSL connection.
"""
expectedHost = 'example.org'
expectedPort = 20443
expectedContext = object()
contextArgs = []
class StubWebContextFactory(object):
def getContext(self, hostname, port):
contextArgs.append((hostname, port))
return expectedContext
agent = client.Agent(self.reactor, StubWebContextFactory())
endpoint = agent._getEndpoint('https', expectedHost, expectedPort)
contextFactory = endpoint._sslContextFactory
context = contextFactory.getContext()
self.assertEqual(context, expectedContext)
self.assertEqual(contextArgs, [(expectedHost, expectedPort)])
def test_hostProvided(self):
"""
If C{None} is passed to L{Agent.request} for the C{headers} parameter,
a L{Headers} instance is created for the request and a I{Host} header
added to it.
"""
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo?bar')
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('host'), ['example.com'])
def test_hostOverride(self):
"""
If the headers passed to L{Agent.request} includes a value for the
I{Host} header, that value takes precedence over the one which would
otherwise be automatically provided.
"""
headers = http_headers.Headers({'foo': ['bar'], 'host': ['quux']})
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo?bar', headers)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('host'), ['quux'])
def test_headersUnmodified(self):
"""
If a I{Host} header must be added to the request, the L{Headers}
instance passed to L{Agent.request} is not modified.
"""
headers = http_headers.Headers()
self.agent._getEndpoint = lambda *args: self
self.agent.request(
'GET', 'http://example.com/foo', headers)
protocol = self.protocol
# The request should have been issued.
self.assertEqual(len(protocol.requests), 1)
# And the headers object passed in should not have changed.
self.assertEqual(headers, http_headers.Headers())
def test_hostValueStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port of C{80},
L{Agent._computeHostValue} returns a string giving just
the host name passed to it.
"""
self.assertEqual(
self.agent._computeHostValue('http', 'example.com', 80),
'example.com')
def test_hostValueNonStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port other than C{80},
L{Agent._computeHostValue} returns a string giving the
host passed to it joined together with the port number by C{":"}.
"""
self.assertEqual(
self.agent._computeHostValue('http', 'example.com', 54321),
'example.com:54321')
def test_hostValueStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port of C{443},
L{Agent._computeHostValue} returns a string giving just
the host name passed to it.
"""
self.assertEqual(
self.agent._computeHostValue('https', 'example.com', 443),
'example.com')
def test_hostValueNonStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port other than C{443},
L{Agent._computeHostValue} returns a string giving the
host passed to it joined together with the port number by C{":"}.
"""
self.assertEqual(
self.agent._computeHostValue('https', 'example.com', 54321),
'example.com:54321')
def test_request(self):
"""
L{Agent.request} establishes a new connection to the host indicated by
the host part of the URI passed to it and issues a request using the
method, the path portion of the URI, the headers, and the body producer
passed to it. It returns a L{Deferred} which fires with an
L{IResponse} from the server.
"""
self.agent._getEndpoint = lambda *args: self
headers = http_headers.Headers({'foo': ['bar']})
# Just going to check the body for identity, so it doesn't need to be
# real.
body = object()
self.agent.request(
'GET', 'http://example.com:1234/foo?bar', headers, body)
protocol = self.protocol
# The request should be issued.
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEqual(req.method, 'GET')
self.assertEqual(req.uri, '/foo?bar')
self.assertEqual(
req.headers,
http_headers.Headers({'foo': ['bar'],
'host': ['example.com:1234']}))
self.assertIdentical(req.bodyProducer, body)
def test_connectTimeout(self):
"""
L{Agent} takes a C{connectTimeout} argument which is forwarded to the
following C{connectTCP} agent.
"""
agent = client.Agent(self.reactor, connectTimeout=5)
agent.request('GET', 'http://foo/')
timeout = self.reactor.tcpClients.pop()[3]
self.assertEqual(5, timeout)
def test_connectSSLTimeout(self):
"""
L{Agent} takes a C{connectTimeout} argument which is forwarded to the
following C{connectSSL} call.
"""
agent = client.Agent(self.reactor, self.StubPolicy(), connectTimeout=5)
agent.request('GET', 'https://foo/')
timeout = self.reactor.sslClients.pop()[4]
self.assertEqual(5, timeout)
def test_bindAddress(self):
"""
L{Agent} takes a C{bindAddress} argument which is forwarded to the
following C{connectTCP} call.
"""
agent = client.Agent(self.reactor, bindAddress='192.168.0.1')
agent.request('GET', 'http://foo/')
address = self.reactor.tcpClients.pop()[4]
self.assertEqual('192.168.0.1', address)
def test_bindAddressSSL(self):
"""
L{Agent} takes a C{bindAddress} argument which is forwarded to the
following C{connectSSL} call.
"""
agent = client.Agent(self.reactor, self.StubPolicy(),
bindAddress='192.168.0.1')
agent.request('GET', 'https://foo/')
address = self.reactor.sslClients.pop()[5]
self.assertEqual('192.168.0.1', address)
def test_responseIncludesRequest(self):
"""
L{Response}s returned by L{Agent.request} have a reference to the
L{Request} that was originally issued.
"""
uri = b'http://example.com/'
agent = self.buildAgentForWrapperTest(self.reactor)
d = agent.request('GET', uri)
# The request should be issued.
self.assertEqual(len(self.protocol.requests), 1)
req, res = self.protocol.requests.pop()
self.assertIsInstance(req, Request)
resp = client.Response._construct(
('HTTP', 1, 1),
200,
'OK',
client.Headers({}),
None,
req)
res.callback(resp)
response = self.successResultOf(d)
self.assertEqual(
(response.request.method, response.request.absoluteURI,
response.request.headers),
(req.method, req.absoluteURI, req.headers))
def test_requestAbsoluteURI(self):
"""
L{Request.absoluteURI} is the absolute URI of the request.
"""
uri = b'http://example.com/foo;1234?bar#frag'
agent = self.buildAgentForWrapperTest(self.reactor)
agent.request(b'GET', uri)
# The request should be issued.
self.assertEqual(len(self.protocol.requests), 1)
req, res = self.protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEquals(req.absoluteURI, uri)
def test_requestMissingAbsoluteURI(self):
"""
L{Request.absoluteURI} is C{None} if L{Request._parsedURI} is C{None}.
"""
request = client.Request(b'FOO', b'/', client.Headers(), None)
self.assertIdentical(request.absoluteURI, None)
class AgentHTTPSTests(TestCase, FakeReactorAndConnectMixin):
"""
Tests for the new HTTP client API that depends on SSL.
"""
if ssl is None:
skip = "SSL not present, cannot run SSL tests"
def makeEndpoint(self, host='example.com', port=443):
"""
Create an L{Agent} with an https scheme and return its endpoint
created according to the arguments.
@param host: The host for the endpoint.
@type host: L{bytes}
@param port: The port for the endpoint.
@type port: L{int}
@return: An endpoint of an L{Agent} constructed according to args.
@rtype: L{SSL4ClientEndpoint}
"""
return client.Agent(self.Reactor())._getEndpoint(b'https', host, port)
def test_endpointType(self):
"""
L{Agent._getEndpoint} return a L{SSL4ClientEndpoint} when passed a
scheme of C{'https'}.
"""
self.assertIsInstance(self.makeEndpoint(), SSL4ClientEndpoint)
def test_hostArgumentIsRespected(self):
"""
If a host is passed, the endpoint respects it.
"""
expectedHost = 'example.com'
endpoint = self.makeEndpoint(host=expectedHost)
self.assertEqual(endpoint._host, expectedHost)
def test_portArgumentIsRespected(self):
"""
If a port is passed, the endpoint respects it.
"""
expectedPort = 4321
endpoint = self.makeEndpoint(port=expectedPort)
self.assertEqual(endpoint._port, expectedPort)
def test_contextFactoryType(self):
"""
L{Agent} wraps its connection creator creator and uses modern TLS APIs.
"""
endpoint = self.makeEndpoint()
contextFactory = endpoint._sslContextFactory
self.assertIsInstance(contextFactory, ClientTLSOptions)
self.assertEqual(contextFactory._hostname, u"example.com")
def test_connectHTTPSCustomConnectionCreator(self):
"""
If a custom L{WebClientConnectionCreator}-like object is passed to
L{Agent.__init__} it will be used to determine the SSL parameters for
HTTPS requests. When an HTTPS request is made, the hostname and port
number of the request URL will be passed to the connection creator's
C{creatorForNetloc} method. The resulting context object will be used
to establish the SSL connection.
"""
expectedHost = 'example.org'
expectedPort = 20443
class JustEnoughConnection(object):
handshakeStarted = False
connectState = False
def do_handshake(self):
"""
The handshake started. Record that fact.
"""
self.handshakeStarted = True
def set_connect_state(self):
"""
The connection started. Record that fact.
"""
self.connectState = True
contextArgs = []
@implementer(IOpenSSLClientConnectionCreator)
class JustEnoughCreator(object):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def clientConnectionForTLS(self, tlsProtocol):
"""
Implement L{IOpenSSLClientConnectionCreator}.
@param tlsProtocol: The TLS protocol.
@type tlsProtocol: L{TLSMemoryBIOProtocol}
@return: C{expectedConnection}
"""
contextArgs.append((tlsProtocol, self.hostname, self.port))
return expectedConnection
expectedConnection = JustEnoughConnection()
@implementer(IPolicyForHTTPS)
class StubBrowserLikePolicyForHTTPS(object):
def creatorForNetloc(self, hostname, port):
"""
Emulate L{BrowserLikePolicyForHTTPS}.
@param hostname: The hostname to verify.
@type hostname: L{unicode}
@param port: The port number.
@type port: L{int}
@return: a stub L{IOpenSSLClientConnectionCreator}
@rtype: L{JustEnoughCreator}
"""
return JustEnoughCreator(hostname, port)
expectedCreatorCreator = StubBrowserLikePolicyForHTTPS()
reactor = self.Reactor()
agent = client.Agent(reactor, expectedCreatorCreator)
endpoint = agent._getEndpoint('https', expectedHost, expectedPort)
endpoint.connect(Factory.forProtocol(Protocol))
passedFactory = reactor.sslClients[-1][2]
passedContextFactory = reactor.sslClients[-1][3]
tlsFactory = TLSMemoryBIOFactory(
passedContextFactory, True, passedFactory
)
tlsProtocol = tlsFactory.buildProtocol(None)
tlsProtocol.makeConnection(StringTransport())
tls = contextArgs[0][0]
self.assertIsInstance(tls, TLSMemoryBIOProtocol)
self.assertEqual(contextArgs[0][1:], (expectedHost, expectedPort))
self.assertTrue(expectedConnection.handshakeStarted)
self.assertTrue(expectedConnection.connectState)
def test_deprecatedDuckPolicy(self):
"""
Passing something that duck-types I{like} a L{web client context
factory <twisted.web.client.WebClientContextFactory>} - something that
does not provide L{IPolicyForHTTPS} - to L{Agent} emits a
L{DeprecationWarning} even if you don't actually C{import
WebClientContextFactory} to do it.
"""
def warnMe():
client.Agent(MemoryReactorClock(),
"does-not-provide-IPolicyForHTTPS")
warnMe()
warnings = self.flushWarnings([warnMe])
self.assertEqual(len(warnings), 1)
[warning] = warnings
self.assertEqual(warning['category'], DeprecationWarning)
self.assertEqual(
warning['message'],
"'does-not-provide-IPolicyForHTTPS' was passed as the HTTPS "
"policy for an Agent, but it does not provide IPolicyForHTTPS. "
"Since Twisted 14.0, you must pass a provider of IPolicyForHTTPS."
)
class WebClientContextFactoryTests(TestCase):
"""
Tests for the context factory wrapper for web clients
L{twisted.web.client.WebClientContextFactory}.
"""
def setUp(self):
"""
Get WebClientContextFactory while quashing its deprecation warning.
"""
from twisted.web.client import WebClientContextFactory
self.warned = self.flushWarnings([WebClientContextFactoryTests.setUp])
self.webClientContextFactory = WebClientContextFactory
def test_deprecated(self):
"""
L{twisted.web.client.WebClientContextFactory} is deprecated. Importing
it displays a warning.
"""
self.assertEqual(len(self.warned), 1)
[warning] = self.warned
self.assertEqual(warning['category'], DeprecationWarning)
self.assertEqual(
warning['message'],
getDeprecationWarningString(
self.webClientContextFactory, Version("Twisted", 14, 0, 0),
replacement=BrowserLikePolicyForHTTPS,
)
# See https://twistedmatrix.com/trac/ticket/7242
.replace(";", ":")
)
def test_missingSSL(self):
"""
If C{getContext} is called and SSL is not available, raise
L{NotImplementedError}.
"""
self.assertRaises(
NotImplementedError,
self.webClientContextFactory().getContext,
'example.com', 443,
)
def test_returnsContext(self):
"""
If SSL is present, C{getContext} returns a L{SSL.Context}.
"""
ctx = self.webClientContextFactory().getContext('example.com', 443)
self.assertIsInstance(ctx, ssl.SSL.Context)
def test_setsTrustRootOnContextToDefaultTrustRoot(self):
"""
The L{CertificateOptions} has C{trustRoot} set to the default trust
roots.
"""
ctx = self.webClientContextFactory()
certificateOptions = ctx._getCertificateOptions('example.com', 443)
self.assertIsInstance(
certificateOptions.trustRoot, ssl.OpenSSLDefaultPaths)
if ssl is None:
test_returnsContext.skip = "SSL not present, cannot run SSL tests."
test_setsTrustRootOnContextToDefaultTrustRoot.skip = (
"SSL not present, cannot run SSL tests.")
else:
test_missingSSL.skip = "SSL present."
class HTTPConnectionPoolRetryTests(TestCase, FakeReactorAndConnectMixin):
"""
L{client.HTTPConnectionPool}, by using
L{client._RetryingHTTP11ClientProtocol}, supports retrying requests done
against previously cached connections.
"""
def test_onlyRetryIdempotentMethods(self):
"""
Only GET, HEAD, OPTIONS, TRACE, DELETE methods cause a retry.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry("HEAD", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"OPTIONS", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"TRACE", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"DELETE", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry(
"POST", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry(
"MYMETHOD", RequestNotSent(), None))
# This will be covered by a different ticket, since we need support
#for resettable body producers:
# self.assertTrue(connection._doRetry("PUT", RequestNotSent(), None))
def test_onlyRetryIfNoResponseReceived(self):
"""
Only L{RequestNotSent}, L{RequestTransmissionFailed} and
L{ResponseNeverReceived} exceptions cause a retry.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertTrue(connection._shouldRetry(
"GET", RequestTransmissionFailed([]), None))
self.assertTrue(connection._shouldRetry(
"GET", ResponseNeverReceived([]),None))
self.assertFalse(connection._shouldRetry(
"GET", ResponseFailed([]), None))
self.assertFalse(connection._shouldRetry(
"GET", ConnectionRefusedError(), None))
def test_dontRetryIfFailedDueToCancel(self):
"""
If a request failed due to the operation being cancelled,
C{_shouldRetry} returns C{False} to indicate the request should not be
retried.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
exception = ResponseNeverReceived([Failure(defer.CancelledError())])
self.assertFalse(connection._shouldRetry(
"GET", exception, None))
def test_retryIfFailedDueToNonCancelException(self):
"""
If a request failed with L{ResponseNeverReceived} due to some
arbitrary exception, C{_shouldRetry} returns C{True} to indicate the
request should be retried.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry(
"GET", ResponseNeverReceived([Failure(Exception())]), None))
def test_wrappedOnPersistentReturned(self):
"""
If L{client.HTTPConnectionPool.getConnection} returns a previously
cached connection, it will get wrapped in a
L{client._RetryingHTTP11ClientProtocol}.
"""
pool = client.HTTPConnectionPool(Clock())
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(123, protocol)
# Retrieve it, it should come back wrapped in a
# _RetryingHTTP11ClientProtocol:
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
self.assertIsInstance(connection,
client._RetryingHTTP11ClientProtocol)
self.assertIdentical(connection._clientProtocol, protocol)
return d.addCallback(gotConnection)
def test_notWrappedOnNewReturned(self):
"""
If L{client.HTTPConnectionPool.getConnection} returns a new
connection, it will be returned as is.
"""
pool = client.HTTPConnectionPool(None)
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
# Don't want to use isinstance since potentially the wrapper might
# subclass it at some point:
self.assertIdentical(connection.__class__, HTTP11ClientProtocol)
return d.addCallback(gotConnection)
def retryAttempt(self, willWeRetry):
"""
Fail a first request, possibly retrying depending on argument.
"""
protocols = []
def newProtocol():
protocol = StubHTTPProtocol()
protocols.append(protocol)
return defer.succeed(protocol)
bodyProducer = object()
request = client.Request("FOO", "/", client.Headers(), bodyProducer,
persistent=True)
newProtocol()
protocol = protocols[0]
retrier = client._RetryingHTTP11ClientProtocol(protocol, newProtocol)
def _shouldRetry(m, e, bp):
self.assertEqual(m, "FOO")
self.assertIdentical(bp, bodyProducer)
self.assertIsInstance(e, (RequestNotSent, ResponseNeverReceived))
return willWeRetry
retrier._shouldRetry = _shouldRetry
d = retrier.request(request)
# So far, one request made:
self.assertEqual(len(protocols), 1)
self.assertEqual(len(protocols[0].requests), 1)
# Fail the first request:
protocol.requests[0][1].errback(RequestNotSent())
return d, protocols
def test_retryIfShouldRetryReturnsTrue(self):
"""
L{client._RetryingHTTP11ClientProtocol} retries when
L{client._RetryingHTTP11ClientProtocol._shouldRetry} returns C{True}.
"""
d, protocols = self.retryAttempt(True)
# We retried!
self.assertEqual(len(protocols), 2)
response = object()
protocols[1].requests[0][1].callback(response)
return d.addCallback(self.assertIdentical, response)
def test_dontRetryIfShouldRetryReturnsFalse(self):
"""
L{client._RetryingHTTP11ClientProtocol} does not retry when
L{client._RetryingHTTP11ClientProtocol._shouldRetry} returns C{False}.
"""
d, protocols = self.retryAttempt(False)
# We did not retry:
self.assertEqual(len(protocols), 1)
return self.assertFailure(d, RequestNotSent)
def test_onlyRetryWithoutBody(self):
"""
L{_RetryingHTTP11ClientProtocol} only retries queries that don't have
a body.
This is an implementation restriction; if the restriction is fixed,
this test should be removed and PUT added to list of methods that
support retries.
"""
pool = client.HTTPConnectionPool(None)
connection = client._RetryingHTTP11ClientProtocol(None, pool)
self.assertTrue(connection._shouldRetry("GET", RequestNotSent(), None))
self.assertFalse(connection._shouldRetry("GET", RequestNotSent(), object()))
def test_onlyRetryOnce(self):
"""
If a L{client._RetryingHTTP11ClientProtocol} fails more than once on
an idempotent query before a response is received, it will not retry.
"""
d, protocols = self.retryAttempt(True)
self.assertEqual(len(protocols), 2)
# Fail the second request too:
protocols[1].requests[0][1].errback(ResponseNeverReceived([]))
# We didn't retry again:
self.assertEqual(len(protocols), 2)
return self.assertFailure(d, ResponseNeverReceived)
def test_dontRetryIfRetryAutomaticallyFalse(self):
"""
If L{HTTPConnectionPool.retryAutomatically} is set to C{False}, don't
wrap connections with retrying logic.
"""
pool = client.HTTPConnectionPool(Clock())
pool.retryAutomatically = False
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(123, protocol)
# Retrieve it, it should come back unwrapped:
d = pool.getConnection(123, DummyEndpoint())
def gotConnection(connection):
self.assertIdentical(connection, protocol)
return d.addCallback(gotConnection)
def test_retryWithNewConnection(self):
"""
L{client.HTTPConnectionPool} creates
{client._RetryingHTTP11ClientProtocol} with a new connection factory
method that creates a new connection using the same key and endpoint
as the wrapped connection.
"""
pool = client.HTTPConnectionPool(Clock())
key = 123
endpoint = DummyEndpoint()
newConnections = []
# Override the pool's _newConnection:
def newConnection(k, e):
newConnections.append((k, e))
pool._newConnection = newConnection
# Add a connection to the cache:
protocol = StubHTTPProtocol()
protocol.makeConnection(StringTransport())
pool._putConnection(key, protocol)
# Retrieve it, it should come back wrapped in a
# _RetryingHTTP11ClientProtocol:
d = pool.getConnection(key, endpoint)
def gotConnection(connection):
self.assertIsInstance(connection,
client._RetryingHTTP11ClientProtocol)
self.assertIdentical(connection._clientProtocol, protocol)
# Verify that the _newConnection method on retrying connection
# calls _newConnection on the pool:
self.assertEqual(newConnections, [])
connection._newConnection()
self.assertEqual(len(newConnections), 1)
self.assertEqual(newConnections[0][0], key)
self.assertIdentical(newConnections[0][1], endpoint)
return d.addCallback(gotConnection)
class CookieTestsMixin(object):
"""
Mixin for unit tests dealing with cookies.
"""
def addCookies(self, cookieJar, uri, cookies):
"""
Add a cookie to a cookie jar.
"""
response = client._FakeUrllib2Response(
client.Response(
('HTTP', 1, 1),
200,
'OK',
client.Headers({'Set-Cookie': cookies}),
None))
request = client._FakeUrllib2Request(uri)
cookieJar.extract_cookies(response, request)
return request, response
class CookieJarTests(TestCase, CookieTestsMixin):
"""
Tests for L{twisted.web.client._FakeUrllib2Response} and
L{twisted.web.client._FakeUrllib2Request}'s interactions with
C{cookielib.CookieJar} instances.
"""
def makeCookieJar(self):
"""
@return: a C{cookielib.CookieJar} with some sample cookies
"""
cookieJar = cookielib.CookieJar()
reqres = self.addCookies(
cookieJar,
'http://example.com:1234/foo?bar',
['foo=1; cow=moo; Path=/foo; Comment=hello',
'bar=2; Comment=goodbye'])
return cookieJar, reqres
def test_extractCookies(self):
"""
L{cookielib.CookieJar.extract_cookies} extracts cookie information from
fake urllib2 response instances.
"""
jar = self.makeCookieJar()[0]
cookies = dict([(c.name, c) for c in jar])
cookie = cookies['foo']
self.assertEqual(cookie.version, 0)
self.assertEqual(cookie.name, 'foo')
self.assertEqual(cookie.value, '1')
self.assertEqual(cookie.path, '/foo')
self.assertEqual(cookie.comment, 'hello')
self.assertEqual(cookie.get_nonstandard_attr('cow'), 'moo')
cookie = cookies['bar']
self.assertEqual(cookie.version, 0)
self.assertEqual(cookie.name, 'bar')
self.assertEqual(cookie.value, '2')
self.assertEqual(cookie.path, '/')
self.assertEqual(cookie.comment, 'goodbye')
self.assertIdentical(cookie.get_nonstandard_attr('cow'), None)
def test_sendCookie(self):
"""
L{cookielib.CookieJar.add_cookie_header} adds a cookie header to a fake
urllib2 request instance.
"""
jar, (request, response) = self.makeCookieJar()
self.assertIdentical(
request.get_header('Cookie', None),
None)
jar.add_cookie_header(request)
self.assertEqual(
request.get_header('Cookie', None),
'foo=1; bar=2')
class CookieAgentTests(TestCase, CookieTestsMixin, FakeReactorAndConnectMixin,
AgentTestsMixin):
"""
Tests for L{twisted.web.client.CookieAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.CookieAgent}
"""
return client.CookieAgent(
self.buildAgentForWrapperTest(self.reactor),
cookielib.CookieJar())
def setUp(self):
self.reactor = self.Reactor()
def test_emptyCookieJarRequest(self):
"""
L{CookieAgent.request} does not insert any C{'Cookie'} header into the
L{Request} object if there is no cookie in the cookie jar for the URI
being requested. Cookies are extracted from the response and stored in
the cookie jar.
"""
cookieJar = cookielib.CookieJar()
self.assertEqual(list(cookieJar), [])
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
d = cookieAgent.request(
'GET', 'http://example.com:1234/foo?bar')
def _checkCookie(ignored):
cookies = list(cookieJar)
self.assertEqual(len(cookies), 1)
self.assertEqual(cookies[0].name, 'foo')
self.assertEqual(cookies[0].value, '1')
d.addCallback(_checkCookie)
req, res = self.protocol.requests.pop()
self.assertIdentical(req.headers.getRawHeaders('cookie'), None)
resp = client.Response(
('HTTP', 1, 1),
200,
'OK',
client.Headers({'Set-Cookie': ['foo=1',]}),
None)
res.callback(resp)
return d
def test_requestWithCookie(self):
"""
L{CookieAgent.request} inserts a C{'Cookie'} header into the L{Request}
object when there is a cookie matching the request URI in the cookie
jar.
"""
uri = 'http://example.com:1234/foo?bar'
cookie = 'foo=1'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), [cookie])
def test_secureCookie(self):
"""
L{CookieAgent} is able to handle secure cookies, ie cookies which
should only be handled over https.
"""
uri = 'https://example.com:1234/foo?bar'
cookie = 'foo=1;secure'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), ['foo=1'])
def test_secureCookieOnInsecureConnection(self):
"""
If a cookie is setup as secure, it won't be sent with the request if
it's not over HTTPS.
"""
uri = 'http://example.com/foo?bar'
cookie = 'foo=1;secure'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertIdentical(None, req.headers.getRawHeaders('cookie'))
def test_portCookie(self):
"""
L{CookieAgent} supports cookies which enforces the port number they
need to be transferred upon.
"""
uri = 'https://example.com:1234/foo?bar'
cookie = 'foo=1;port=1234'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 1)
agent = self.buildAgentForWrapperTest(self.reactor)
cookieAgent = client.CookieAgent(agent, cookieJar)
cookieAgent.request('GET', uri)
req, res = self.protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('cookie'), ['foo=1'])
def test_portCookieOnWrongPort(self):
"""
When creating a cookie with a port directive, it won't be added to the
L{cookie.CookieJar} if the URI is on a different port.
"""
uri = 'https://example.com:4567/foo?bar'
cookie = 'foo=1;port=1234'
cookieJar = cookielib.CookieJar()
self.addCookies(cookieJar, uri, [cookie])
self.assertEqual(len(list(cookieJar)), 0)
class Decoder1(proxyForInterface(IResponse)):
"""
A test decoder to be used by L{client.ContentDecoderAgent} tests.
"""
class Decoder2(Decoder1):
"""
A test decoder to be used by L{client.ContentDecoderAgent} tests.
"""
class ContentDecoderAgentTests(TestCase, FakeReactorAndConnectMixin,
AgentTestsMixin):
"""
Tests for L{client.ContentDecoderAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.ContentDecoderAgent}
"""
return client.ContentDecoderAgent(self.agent, [])
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
self.agent = self.buildAgentForWrapperTest(self.reactor)
def test_acceptHeaders(self):
"""
L{client.ContentDecoderAgent} sets the I{Accept-Encoding} header to the
names of the available decoder objects.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
agent.request('GET', 'http://example.com/foo')
protocol = self.protocol
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEqual(req.headers.getRawHeaders('accept-encoding'),
['decoder1,decoder2'])
def test_existingHeaders(self):
"""
If there are existing I{Accept-Encoding} fields,
L{client.ContentDecoderAgent} creates a new field for the decoders it
knows about.
"""
headers = http_headers.Headers({'foo': ['bar'],
'accept-encoding': ['fizz']})
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
agent.request('GET', 'http://example.com/foo', headers=headers)
protocol = self.protocol
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEqual(
list(req.headers.getAllRawHeaders()),
[('Host', ['example.com']),
('Foo', ['bar']),
('Accept-Encoding', ['fizz', 'decoder1,decoder2'])])
def test_plainEncodingResponse(self):
"""
If the response is not encoded despited the request I{Accept-Encoding}
headers, L{client.ContentDecoderAgent} simply forwards the response.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
response = Response(('HTTP', 1, 1), 200, 'OK', http_headers.Headers(),
None)
res.callback(response)
return deferred.addCallback(self.assertIdentical, response)
def test_unsupportedEncoding(self):
"""
If an encoding unknown to the L{client.ContentDecoderAgent} is found,
the response is unchanged.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['fizz']})
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
return deferred.addCallback(self.assertIdentical, response)
def test_unknownEncoding(self):
"""
When L{client.ContentDecoderAgent} encounters a decoder it doesn't know
about, it stops decoding even if another encoding is known afterwards.
"""
agent = client.ContentDecoderAgent(
self.agent, [('decoder1', Decoder1), ('decoder2', Decoder2)])
deferred = agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding':
['decoder1,fizz,decoder2']})
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
def check(result):
self.assertNotIdentical(response, result)
self.assertIsInstance(result, Decoder2)
self.assertEqual(['decoder1,fizz'],
result.headers.getRawHeaders('content-encoding'))
return deferred.addCallback(check)
class SimpleAgentProtocol(Protocol):
"""
A L{Protocol} to be used with an L{client.Agent} to receive data.
@ivar finished: L{Deferred} firing when C{connectionLost} is called.
@ivar made: L{Deferred} firing when C{connectionMade} is called.
@ivar received: C{list} of received data.
"""
def __init__(self):
self.made = Deferred()
self.finished = Deferred()
self.received = []
def connectionMade(self):
self.made.callback(None)
def connectionLost(self, reason):
self.finished.callback(None)
def dataReceived(self, data):
self.received.append(data)
class ContentDecoderAgentWithGzipTests(TestCase,
FakeReactorAndConnectMixin):
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
self.reactor = self.Reactor()
agent = self.buildAgentForWrapperTest(self.reactor)
self.agent = client.ContentDecoderAgent(
agent, [("gzip", client.GzipDecoder)])
def test_gzipEncodingResponse(self):
"""
If the response has a C{gzip} I{Content-Encoding} header,
L{GzipDecoder} wraps the response to return uncompressed data to the
user.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
response.length = 12
res.callback(response)
compressor = zlib.compressobj(2, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = (compressor.compress('x' * 6) + compressor.compress('y' * 4) +
compressor.flush())
def checkResponse(result):
self.assertNotIdentical(result, response)
self.assertEqual(result.version, ('HTTP', 1, 1))
self.assertEqual(result.code, 200)
self.assertEqual(result.phrase, 'OK')
self.assertEqual(list(result.headers.getAllRawHeaders()),
[('Foo', ['bar'])])
self.assertEqual(result.length, UNKNOWN_LENGTH)
self.assertRaises(AttributeError, getattr, result, 'unknown')
response._bodyDataReceived(data[:5])
response._bodyDataReceived(data[5:])
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x' * 6 + 'y' * 4])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
return deferred
def test_brokenContent(self):
"""
If the data received by the L{GzipDecoder} isn't valid gzip-compressed
data, the call to C{deliverBody} fails with a C{zlib.error}.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'foo': ['bar'],
'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
response.length = 12
res.callback(response)
data = "not gzipped content"
def checkResponse(result):
response._bodyDataReceived(data)
result.deliverBody(Protocol())
deferred.addCallback(checkResponse)
self.assertFailure(deferred, client.ResponseFailed)
def checkFailure(error):
error.reasons[0].trap(zlib.error)
self.assertIsInstance(error.response, Response)
return deferred.addCallback(checkFailure)
def test_flushData(self):
"""
When the connection with the server is lost, the gzip protocol calls
C{flush} on the zlib decompressor object to get uncompressed data which
may have been buffered.
"""
class decompressobj(object):
def __init__(self, wbits):
pass
def decompress(self, data):
return 'x'
def flush(self):
return 'y'
oldDecompressObj = zlib.decompressobj
zlib.decompressobj = decompressobj
self.addCleanup(setattr, zlib, 'decompressobj', oldDecompressObj)
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
res.callback(response)
def checkResponse(result):
response._bodyDataReceived('data')
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x', 'y'])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
return deferred
def test_flushError(self):
"""
If the C{flush} call in C{connectionLost} fails, the C{zlib.error}
exception is caught and turned into a L{ResponseFailed}.
"""
class decompressobj(object):
def __init__(self, wbits):
pass
def decompress(self, data):
return 'x'
def flush(self):
raise zlib.error()
oldDecompressObj = zlib.decompressobj
zlib.decompressobj = decompressobj
self.addCleanup(setattr, zlib, 'decompressobj', oldDecompressObj)
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers({'content-encoding': ['gzip']})
transport = StringTransport()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, transport)
res.callback(response)
def checkResponse(result):
response._bodyDataReceived('data')
response._bodyDataFinished()
protocol = SimpleAgentProtocol()
result.deliverBody(protocol)
self.assertEqual(protocol.received, ['x', 'y'])
return defer.gatherResults([protocol.made, protocol.finished])
deferred.addCallback(checkResponse)
self.assertFailure(deferred, client.ResponseFailed)
def checkFailure(error):
error.reasons[1].trap(zlib.error)
self.assertIsInstance(error.response, Response)
return deferred.addCallback(checkFailure)
class ProxyAgentTests(TestCase, FakeReactorAndConnectMixin, AgentTestsMixin):
"""
Tests for L{client.ProxyAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.ProxyAgent}
"""
return client.ProxyAgent(
TCP4ClientEndpoint(self.reactor, "127.0.0.1", 1234),
self.reactor)
def setUp(self):
self.reactor = self.Reactor()
self.agent = client.ProxyAgent(
TCP4ClientEndpoint(self.reactor, "bar", 5678), self.reactor)
oldEndpoint = self.agent._proxyEndpoint
self.agent._proxyEndpoint = self.StubEndpoint(oldEndpoint, self)
def test_proxyRequest(self):
"""
L{client.ProxyAgent} issues an HTTP request against the proxy, with the
full URI as path, when C{request} is called.
"""
headers = http_headers.Headers({'foo': ['bar']})
# Just going to check the body for identity, so it doesn't need to be
# real.
body = object()
self.agent.request(
'GET', 'http://example.com:1234/foo?bar', headers, body)
host, port, factory = self.reactor.tcpClients.pop()[:3]
self.assertEqual(host, "bar")
self.assertEqual(port, 5678)
self.assertIsInstance(factory._wrappedFactory,
client._HTTP11ClientFactory)
protocol = self.protocol
# The request should be issued.
self.assertEqual(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEqual(req.method, 'GET')
self.assertEqual(req.uri, 'http://example.com:1234/foo?bar')
self.assertEqual(
req.headers,
http_headers.Headers({'foo': ['bar'],
'host': ['example.com:1234']}))
self.assertIdentical(req.bodyProducer, body)
def test_nonPersistent(self):
"""
C{ProxyAgent} connections are not persistent by default.
"""
self.assertEqual(self.agent._pool.persistent, False)
def test_connectUsesConnectionPool(self):
"""
When a connection is made by the C{ProxyAgent}, it uses its pool's
C{getConnection} method to do so, with the endpoint it was constructed
with and a key of C{("http-proxy", endpoint)}.
"""
endpoint = DummyEndpoint()
class DummyPool(object):
connected = False
persistent = False
def getConnection(this, key, ep):
this.connected = True
self.assertIdentical(ep, endpoint)
# The key is *not* tied to the final destination, but only to
# the address of the proxy, since that's where *we* are
# connecting:
self.assertEqual(key, ("http-proxy", endpoint))
return defer.succeed(StubHTTPProtocol())
pool = DummyPool()
agent = client.ProxyAgent(endpoint, self.reactor, pool=pool)
self.assertIdentical(pool, agent._pool)
agent.request('GET', 'http://foo/')
self.assertEqual(agent._pool.connected, True)
class _RedirectAgentTestsMixin(object):
"""
Test cases mixin for L{RedirectAgentTests} and
L{BrowserLikeRedirectAgentTests}.
"""
def test_noRedirect(self):
"""
L{client.RedirectAgent} behaves like L{client.Agent} if the response
doesn't contain a redirect.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), 200, 'OK', headers, None)
res.callback(response)
self.assertEqual(0, len(self.protocol.requests))
result = self.successResultOf(deferred)
self.assertIdentical(response, result)
self.assertIdentical(result.previousResponse, None)
def _testRedirectDefault(self, code):
"""
When getting a redirect, L{client.RedirectAgent} follows the URL
specified in the L{Location} header field and make a new request.
@param code: HTTP status code.
"""
self.agent.request('GET', 'http://example.com/foo')
host, port = self.reactor.tcpClients.pop()[:2]
self.assertEqual("example.com", host)
self.assertEqual(80, port)
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': ['https://example.com/bar']})
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual('/bar', req2.uri)
host, port = self.reactor.sslClients.pop()[:2]
self.assertEqual("example.com", host)
self.assertEqual(443, port)
def test_redirect301(self):
"""
L{client.RedirectAgent} follows redirects on status code 301.
"""
self._testRedirectDefault(301)
def test_redirect302(self):
"""
L{client.RedirectAgent} follows redirects on status code 302.
"""
self._testRedirectDefault(302)
def test_redirect307(self):
"""
L{client.RedirectAgent} follows redirects on status code 307.
"""
self._testRedirectDefault(307)
def _testRedirectToGet(self, code, method):
"""
L{client.RedirectAgent} changes the method to I{GET} when getting
a redirect on a non-I{GET} request.
@param code: HTTP status code.
@param method: HTTP request method.
"""
self.agent.request(method, 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': ['http://example.com/bar']})
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual('/bar', req2.uri)
def test_redirect303(self):
"""
L{client.RedirectAgent} changes the method to I{GET} when getting a 303
redirect on a I{POST} request.
"""
self._testRedirectToGet(303, 'POST')
def test_noLocationField(self):
"""
If no L{Location} header field is found when getting a redirect,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping a
L{error.RedirectWithNoLocation} exception.
"""
deferred = self.agent.request('GET', 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), 301, 'OK', headers, None)
res.callback(response)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.RedirectWithNoLocation)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.uri)
self.assertEqual(301, fail.value.response.code)
def _testPageRedirectFailure(self, code, method):
"""
When getting a redirect on an unsupported request method,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
@param code: HTTP status code.
@param method: HTTP request method.
"""
deferred = self.agent.request(method, 'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers()
response = Response(('HTTP', 1, 1), code, 'OK', headers, None)
res.callback(response)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.PageRedirect)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.location)
self.assertEqual(code, fail.value.response.code)
def test_307OnPost(self):
"""
When getting a 307 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(307, 'POST')
def test_redirectLimit(self):
"""
If the limit of redirects specified to L{client.RedirectAgent} is
reached, the deferred fires with L{ResponseFailed} error wrapping
a L{InfiniteRedirection} exception.
"""
agent = self.buildAgentForWrapperTest(self.reactor)
redirectAgent = client.RedirectAgent(agent, 1)
deferred = redirectAgent.request(b'GET', b'http://example.com/foo')
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{b'location': [b'http://example.com/bar']})
response = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
response2 = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
res2.callback(response2)
fail = self.failureResultOf(deferred, client.ResponseFailed)
fail.value.reasons[0].trap(error.InfiniteRedirection)
self.assertEqual('http://example.com/foo',
fail.value.reasons[0].value.location)
self.assertEqual(302, fail.value.response.code)
def _testRedirectURI(self, uri, location, finalURI):
"""
When L{client.RedirectAgent} encounters a relative redirect I{URI}, it
is resolved against the request I{URI} before following the redirect.
@param uri: Request URI.
@param location: I{Location} header redirect URI.
@param finalURI: Expected final URI.
"""
self.agent.request('GET', uri)
req, res = self.protocol.requests.pop()
headers = http_headers.Headers(
{'location': [location]})
response = Response(('HTTP', 1, 1), 302, 'OK', headers, None)
res.callback(response)
req2, res2 = self.protocol.requests.pop()
self.assertEqual('GET', req2.method)
self.assertEqual(finalURI, req2.absoluteURI)
def test_relativeURI(self):
"""
L{client.RedirectAgent} resolves and follows relative I{URI}s in
redirects, preserving query strings.
"""
self._testRedirectURI(
'http://example.com/foo/bar', 'baz',
'http://example.com/foo/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz',
'http://example.com/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz?a',
'http://example.com/baz?a')
def test_relativeURIPreserveFragments(self):
"""
L{client.RedirectAgent} resolves and follows relative I{URI}s in
redirects, preserving fragments in way that complies with the HTTP 1.1
bis draft.
@see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2}
"""
self._testRedirectURI(
'http://example.com/foo/bar#frag', '/baz?a',
'http://example.com/baz?a#frag')
self._testRedirectURI(
'http://example.com/foo/bar', '/baz?a#frag2',
'http://example.com/baz?a#frag2')
def test_relativeURISchemeRelative(self):
"""
L{client.RedirectAgent} resolves and follows scheme relative I{URI}s in
redirects, replacing the hostname and port when required.
"""
self._testRedirectURI(
'http://example.com/foo/bar', '//foo.com/baz',
'http://foo.com/baz')
self._testRedirectURI(
'http://example.com/foo/bar', '//foo.com:81/baz',
'http://foo.com:81/baz')
def test_responseHistory(self):
"""
L{Response.response} references the previous L{Response} from
a redirect, or C{None} if there was no previous response.
"""
agent = self.buildAgentForWrapperTest(self.reactor)
redirectAgent = client.RedirectAgent(agent)
deferred = redirectAgent.request(b'GET', b'http://example.com/foo')
redirectReq, redirectRes = self.protocol.requests.pop()
headers = http_headers.Headers(
{b'location': [b'http://example.com/bar']})
redirectResponse = Response((b'HTTP', 1, 1), 302, b'OK', headers, None)
redirectRes.callback(redirectResponse)
req, res = self.protocol.requests.pop()
response = Response((b'HTTP', 1, 1), 200, b'OK', headers, None)
res.callback(response)
finalResponse = self.successResultOf(deferred)
self.assertIdentical(finalResponse.previousResponse, redirectResponse)
self.assertIdentical(redirectResponse.previousResponse, None)
class RedirectAgentTests(TestCase, FakeReactorAndConnectMixin,
_RedirectAgentTestsMixin, AgentTestsMixin):
"""
Tests for L{client.RedirectAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.RedirectAgent}
"""
return client.RedirectAgent(
self.buildAgentForWrapperTest(self.reactor))
def setUp(self):
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_301OnPost(self):
"""
When getting a 301 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(301, 'POST')
def test_302OnPost(self):
"""
When getting a 302 redirect on a I{POST} request,
L{client.RedirectAgent} fails with a L{ResponseFailed} error wrapping
a L{error.PageRedirect} exception.
"""
self._testPageRedirectFailure(302, 'POST')
class BrowserLikeRedirectAgentTests(TestCase,
FakeReactorAndConnectMixin,
_RedirectAgentTestsMixin,
AgentTestsMixin):
"""
Tests for L{client.BrowserLikeRedirectAgent}.
"""
def makeAgent(self):
"""
@return: a new L{twisted.web.client.BrowserLikeRedirectAgent}
"""
return client.BrowserLikeRedirectAgent(
self.buildAgentForWrapperTest(self.reactor))
def setUp(self):
self.reactor = self.Reactor()
self.agent = self.makeAgent()
def test_redirectToGet301(self):
"""
L{client.BrowserLikeRedirectAgent} changes the method to I{GET} when
getting a 302 redirect on a I{POST} request.
"""
self._testRedirectToGet(301, 'POST')
def test_redirectToGet302(self):
"""
L{client.BrowserLikeRedirectAgent} changes the method to I{GET} when
getting a 302 redirect on a I{POST} request.
"""
self._testRedirectToGet(302, 'POST')
class DummyResponse(object):
"""
Fake L{IResponse} for testing readBody that just captures the protocol
passed to deliverBody.
@ivar protocol: After C{deliverBody} is called, the protocol it was called
with.
"""
code = 200
phrase = "OK"
def __init__(self, headers=None):
"""
@param headers: The headers for this response. If C{None}, an empty
L{Headers} instance will be used.
@type headers: L{Headers}
"""
if headers is None:
headers = Headers()
self.headers = headers
def deliverBody(self, protocol):
"""
Just record the given protocol without actually delivering anything to
it.
"""
self.protocol = protocol
class ReadBodyTests(TestCase):
"""
Tests for L{client.readBody}
"""
def test_success(self):
"""
L{client.readBody} returns a L{Deferred} which fires with the complete
body of the L{IResponse} provider passed to it.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.dataReceived("second")
response.protocol.connectionLost(Failure(ResponseDone()))
self.assertEqual(self.successResultOf(d), "firstsecond")
def test_withPotentialDataLoss(self):
"""
If the full body of the L{IResponse} passed to L{client.readBody} is
not definitely received, the L{Deferred} returned by L{client.readBody}
fires with a L{Failure} wrapping L{client.PartialDownloadError} with
the content that was received.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.dataReceived("second")
response.protocol.connectionLost(Failure(PotentialDataLoss()))
failure = self.failureResultOf(d)
failure.trap(client.PartialDownloadError)
self.assertEqual({
"status": failure.value.status,
"message": failure.value.message,
"body": failure.value.response,
}, {
"status": 200,
"message": "OK",
"body": "firstsecond",
})
def test_otherErrors(self):
"""
If there is an exception other than L{client.PotentialDataLoss} while
L{client.readBody} is collecting the response body, the L{Deferred}
returned by {client.readBody} fires with that exception.
"""
response = DummyResponse()
d = client.readBody(response)
response.protocol.dataReceived("first")
response.protocol.connectionLost(
Failure(ConnectionLost("mystery problem")))
reason = self.failureResultOf(d)
reason.trap(ConnectionLost)
self.assertEqual(reason.value.args, ("mystery problem",))
| bsd-3-clause |
avorio/rdflib | rdflib/plugins/sparql/aggregates.py | 21 | 3932 | from rdflib import Literal, XSD
from rdflib.plugins.sparql.evalutils import _eval
from rdflib.plugins.sparql.operators import numeric
from rdflib.plugins.sparql.datatypes import type_promotion
from rdflib.plugins.sparql.compat import num_max, num_min
from decimal import Decimal
"""
Aggregation functions
"""
def _eval_rows(expr, group):
for row in group:
try:
yield _eval(expr, row)
except:
pass
def agg_Sum(a, group, bindings):
c = 0
dt = None
for x in group:
try:
e = _eval(a.vars, x)
n = numeric(e)
if dt == None:
dt = e.datatype
else:
dt = type_promotion(dt, e.datatype)
if type(c) == float and type(n) == Decimal:
c += float(n)
elif type(n) == float and type(c) == Decimal:
c = float(c) + n
else:
c += n
except:
pass # simply dont count
bindings[a.res] = Literal(c, datatype=dt)
# Perhaps TODO: keep datatype for max/min?
def agg_Min(a, group, bindings):
m = None
for x in group:
try:
v = numeric(_eval(a.vars, x))
if m is None:
m = v
else:
m = num_min(v, m)
except:
return # error in aggregate => no binding
if m is not None:
bindings[a.res] = Literal(m)
def agg_Max(a, group, bindings):
m = None
for x in group:
try:
v = numeric(_eval(a.vars, x))
if m is None:
m = v
else:
m = num_max(v, m)
except:
return # error in aggregate => no binding
if m is not None:
bindings[a.res] = Literal(m)
def agg_Count(a, group, bindings):
c = 0
for x in group:
try:
if a.vars != '*':
_eval(a.vars, x)
c += 1
except:
return # error in aggregate => no binding
# pass # simply dont count
bindings[a.res] = Literal(c)
def agg_Sample(a, group, bindings):
try:
bindings[a.res] = _eval(a.vars, iter(group).next())
except StopIteration:
pass # no res
def agg_GroupConcat(a, group, bindings):
sep = a.separator or " "
bindings[a.res] = Literal(
sep.join(unicode(x) for x in _eval_rows(a.vars, group)))
def agg_Avg(a, group, bindings):
c = 0
s = 0
dt = None
for x in group:
try:
e = _eval(a.vars, x)
n = numeric(e)
if dt == None:
dt = e.datatype
else:
dt = type_promotion(dt, e.datatype)
if type(s) == float and type(n) == Decimal:
s += float(n)
elif type(n) == float and type(s) == Decimal:
s = float(s) + n
else:
s += n
c += 1
except:
return # error in aggregate => no binding
if c == 0:
bindings[a.res] = Literal(0)
if dt == XSD.float or dt == XSD.double:
bindings[a.res] = Literal(s / c)
else:
bindings[a.res] = Literal(Decimal(s) / Decimal(c))
def evalAgg(a, group, bindings):
if a.name == 'Aggregate_Count':
return agg_Count(a, group, bindings)
elif a.name == 'Aggregate_Sum':
return agg_Sum(a, group, bindings)
elif a.name == 'Aggregate_Sample':
return agg_Sample(a, group, bindings)
elif a.name == 'Aggregate_GroupConcat':
return agg_GroupConcat(a, group, bindings)
elif a.name == 'Aggregate_Avg':
return agg_Avg(a, group, bindings)
elif a.name == 'Aggregate_Min':
return agg_Min(a, group, bindings)
elif a.name == 'Aggregate_Max':
return agg_Max(a, group, bindings)
else:
raise Exception("Unknown aggregate function " + a.name)
| bsd-3-clause |
slonik-az/cython | Cython/Build/Dependencies.py | 13 | 37765 | from __future__ import absolute_import, print_function
import cython
from .. import __version__
import re, os, sys, time
from glob import iglob
try:
import gzip
gzip_open = gzip.open
gzip_ext = '.gz'
except ImportError:
gzip_open = open
gzip_ext = ''
import shutil
import subprocess
try:
import hashlib
except ImportError:
import md5 as hashlib
try:
from io import open as io_open
except ImportError:
from codecs import open as io_open
try:
from os.path import relpath as _relpath
except ImportError:
# Py<2.6
def _relpath(path, start=os.path.curdir):
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
from distutils.extension import Extension
from .. import Utils
from ..Utils import cached_function, cached_method, path_exists, find_root_package_dir, is_package_dir
from ..Compiler.Main import Context, CompilationOptions, default_options
join_path = cached_function(os.path.join)
if sys.version_info[0] < 3:
# stupid Py2 distutils enforces str type in list of sources
_fs_encoding = sys.getfilesystemencoding()
if _fs_encoding is None:
_fs_encoding = sys.getdefaultencoding()
def encode_filename_in_py2(filename):
if not isinstance(filename, bytes):
return filename.encode(_fs_encoding)
return filename
else:
def encode_filename_in_py2(filename):
return filename
basestring = str
def extended_iglob(pattern):
if '{' in pattern:
m = re.match('(.*){([^}]+)}(.*)', pattern)
if m:
before, switch, after = m.groups()
for case in switch.split(','):
for path in extended_iglob(before + case + after):
yield path
return
if '**/' in pattern:
seen = set()
first, rest = pattern.split('**/', 1)
if first:
first = iglob(first+'/')
else:
first = ['']
for root in first:
for path in extended_iglob(join_path(root, rest)):
if path not in seen:
seen.add(path)
yield path
for path in extended_iglob(join_path(root, '*', '**/' + rest)):
if path not in seen:
seen.add(path)
yield path
else:
for path in iglob(pattern):
yield path
def nonempty(it, error_msg="expected non-empty iterator"):
empty = True
for value in it:
empty = False
yield value
if empty:
raise ValueError(error_msg)
@cached_function
def file_hash(filename):
path = os.path.normpath(filename.encode("UTF-8"))
m = hashlib.md5(str(len(path)) + ":")
m.update(path)
f = open(filename, 'rb')
try:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
finally:
f.close()
return m.hexdigest()
def parse_list(s):
"""
>>> parse_list("a b c")
['a', 'b', 'c']
>>> parse_list("[a, b, c]")
['a', 'b', 'c']
>>> parse_list('a " " b')
['a', ' ', 'b']
>>> parse_list('[a, ",a", "a,", ",", ]')
['a', ',a', 'a,', ',']
"""
if s[0] == '[' and s[-1] == ']':
s = s[1:-1]
delimiter = ','
else:
delimiter = ' '
s, literals = strip_string_literals(s)
def unquote(literal):
literal = literal.strip()
if literal[0] in "'\"":
return literals[literal[1:-1]]
else:
return literal
return [unquote(item) for item in s.split(delimiter) if item.strip()]
transitive_str = object()
transitive_list = object()
distutils_settings = {
'name': str,
'sources': list,
'define_macros': list,
'undef_macros': list,
'libraries': transitive_list,
'library_dirs': transitive_list,
'runtime_library_dirs': transitive_list,
'include_dirs': transitive_list,
'extra_objects': list,
'extra_compile_args': transitive_list,
'extra_link_args': transitive_list,
'export_symbols': list,
'depends': transitive_list,
'language': transitive_str,
}
@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if end == -1:
yield source[start:]
return
yield source[start:end]
start = end+1
else:
for line in source:
yield line
class DistutilsInfo(object):
def __init__(self, source=None, exn=None):
self.values = {}
if source is not None:
for line in line_iter(source):
line = line.strip()
if line != '' and line[0] != '#':
break
line = line[1:].strip()
if line[:10] == 'distutils:':
line = line[10:]
ix = line.index('=')
key = str(line[:ix].strip())
value = line[ix+1:].strip()
type = distutils_settings[key]
if type in (list, transitive_list):
value = parse_list(value)
if key == 'define_macros':
value = [tuple(macro.split('=')) for macro in value]
self.values[key] = value
elif exn is not None:
for key in distutils_settings:
if key in ('name', 'sources'):
continue
value = getattr(exn, key, None)
if value:
self.values[key] = value
def merge(self, other):
if other is None:
return self
for key, value in other.values.items():
type = distutils_settings[key]
if type is transitive_str and key not in self.values:
self.values[key] = value
elif type is transitive_list:
if key in self.values:
# Change a *copy* of the list (Trac #845)
all = self.values[key][:]
for v in value:
if v not in all:
all.append(v)
value = all
self.values[key] = value
return self
def subs(self, aliases):
if aliases is None:
return self
resolved = DistutilsInfo()
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
new_value_list = []
for v in value:
if v in aliases:
v = aliases[v]
if isinstance(v, list):
new_value_list += v
else:
new_value_list.append(v)
value = new_value_list
else:
if value in aliases:
value = aliases[value]
resolved.values[key] = value
return resolved
def apply(self, extension):
for key, value in self.values.items():
type = distutils_settings[key]
if type in [list, transitive_list]:
value = getattr(extension, key) + list(value)
setattr(extension, key, value)
@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t,
single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t,
hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t,
k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t)
def strip_string_literals(code, prefix='__Pyx_L'):
"""
Normalizes every string literal to be of the form '__Pyx_Lxxx',
returning the normalized code and a mapping of labels to
string literals.
"""
new_code = []
literals = {}
counter = 0
start = q = 0
in_quote = False
hash_mark = single_q = double_q = -1
code_len = len(code)
quote_type = quote_len = None
while True:
if hash_mark < q:
hash_mark = code.find('#', q)
if single_q < q:
single_q = code.find("'", q)
if double_q < q:
double_q = code.find('"', q)
q = min(single_q, double_q)
if q == -1:
q = max(single_q, double_q)
# We're done.
if q == -1 and hash_mark == -1:
new_code.append(code[start:])
break
# Try to close the quote.
elif in_quote:
if code[q-1] == u'\\':
k = 2
while q >= k and code[q-k] == u'\\':
k += 1
if k % 2 == 0:
q += 1
continue
if code[q] == quote_type and (
quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
counter += 1
label = "%s%s_" % (prefix, counter)
literals[label] = code[start+quote_len:q]
full_quote = code[q:q+quote_len]
new_code.append(full_quote)
new_code.append(label)
new_code.append(full_quote)
q += quote_len
in_quote = False
start = q
else:
q += 1
# Process comment.
elif -1 != hash_mark and (hash_mark < q or q == -1):
new_code.append(code[start:hash_mark+1])
end = code.find('\n', hash_mark)
counter += 1
label = "%s%s_" % (prefix, counter)
if end == -1:
end_or_none = None
else:
end_or_none = end
literals[label] = code[hash_mark+1:end_or_none]
new_code.append(label)
if end == -1:
break
start = q = end
# Open the quote.
else:
if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
quote_len = 3
else:
quote_len = 1
in_quote = True
quote_type = code[q]
new_code.append(code[start:q])
start = q
q += quote_len
return "".join(new_code), literals
dependency_regex = re.compile(r"(?:^from +([0-9a-zA-Z_.]+) +cimport)|"
r"(?:^cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|"
r"(?:^cdef +extern +from +['\"]([^'\"]+)['\"])|"
r"(?:^include +['\"]([^'\"]+)['\"])", re.M)
def normalize_existing(base_path, rel_paths):
return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
@cached_function
def normalize_existing0(base_dir, rel_paths):
normalized = []
for rel in rel_paths:
path = join_path(base_dir, rel)
if path_exists(path):
normalized.append(os.path.normpath(path))
else:
normalized.append(rel)
return normalized
def resolve_depends(depends, include_dirs):
include_dirs = tuple(include_dirs)
resolved = []
for depend in depends:
path = resolve_depend(depend, include_dirs)
if path is not None:
resolved.append(path)
return resolved
@cached_function
def resolve_depend(depend, include_dirs):
if depend[0] == '<' and depend[-1] == '>':
return None
for dir in include_dirs:
path = join_path(dir, depend)
if path_exists(path):
return os.path.normpath(path)
return None
@cached_function
def package(filename):
dir = os.path.dirname(os.path.abspath(str(filename)))
if dir != filename and is_package_dir(dir):
return package(dir) + (os.path.basename(dir),)
else:
return ()
@cached_function
def fully_qualified_name(filename):
module = os.path.splitext(os.path.basename(filename))[0]
return '.'.join(package(filename) + (module,))
@cached_function
def parse_dependencies(source_filename):
# Actual parsing is way to slow, so we use regular expressions.
# The only catch is that we must strip comments and string
# literals ahead of time.
fh = Utils.open_source_file(source_filename, error_handling='ignore')
try:
source = fh.read()
finally:
fh.close()
distutils_info = DistutilsInfo(source)
source, literals = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
# TODO: pure mode
cimports = []
includes = []
externs = []
for m in dependency_regex.finditer(source):
cimport_from, cimport_list, extern, include = m.groups()
if cimport_from:
cimports.append(cimport_from)
elif cimport_list:
cimports.extend(x.strip() for x in cimport_list.split(","))
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return cimports, includes, externs, distutils_info
class DependencyTree(object):
def __init__(self, context, quiet=False):
self.context = context
self.quiet = quiet
self._transitive_cache = {}
def parse_dependencies(self, source_filename):
return parse_dependencies(source_filename)
@cached_method
def included_files(self, filename):
# This is messy because included files are textually included, resolving
# cimports (but not includes) relative to the including file.
all = set()
for include in self.parse_dependencies(filename)[1]:
include_path = join_path(os.path.dirname(filename), include)
if not path_exists(include_path):
include_path = self.context.find_include_file(include, None)
if include_path:
if '.' + os.path.sep in include_path:
include_path = os.path.normpath(include_path)
all.add(include_path)
all.update(self.included_files(include_path))
elif not self.quiet:
print("Unable to locate '%s' referenced from '%s'" % (filename, include))
return all
@cached_method
def cimports_and_externs(self, filename):
# This is really ugly. Nested cimports are resolved with respect to the
# includer, but includes are resolved with respect to the includee.
cimports, includes, externs = self.parse_dependencies(filename)[:3]
cimports = set(cimports)
externs = set(externs)
for include in self.included_files(filename):
included_cimports, included_externs = self.cimports_and_externs(include)
cimports.update(included_cimports)
externs.update(included_externs)
return tuple(cimports), normalize_existing(filename, externs)
def cimports(self, filename):
return self.cimports_and_externs(filename)[0]
def package(self, filename):
return package(filename)
def fully_qualified_name(self, filename):
return fully_qualified_name(filename)
@cached_method
def find_pxd(self, module, filename=None):
is_relative = module[0] == '.'
if is_relative and not filename:
raise NotImplementedError("New relative imports.")
if filename is not None:
module_path = module.split('.')
if is_relative:
module_path.pop(0) # just explicitly relative
package_path = list(self.package(filename))
while module_path and not module_path[0]:
try:
package_path.pop()
except IndexError:
return None # FIXME: error?
module_path.pop(0)
relative = '.'.join(package_path + module_path)
pxd = self.context.find_pxd_file(relative, None)
if pxd:
return pxd
if is_relative:
return None # FIXME: error?
return self.context.find_pxd_file(module, None)
@cached_method
def cimported_files(self, filename):
if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'):
pxd_list = [filename[:-4] + '.pxd']
else:
pxd_list = []
for module in self.cimports(filename):
if module[:7] == 'cython.' or module == 'cython':
continue
pxd_file = self.find_pxd(module, filename)
if pxd_file is not None:
pxd_list.append(pxd_file)
elif not self.quiet:
print("missing cimport in module '%s': %s" % (module, filename))
return tuple(pxd_list)
@cached_method
def immediate_dependencies(self, filename):
all = set([filename])
all.update(self.cimported_files(filename))
all.update(self.included_files(filename))
return all
def all_dependencies(self, filename):
return self.transitive_merge(filename, self.immediate_dependencies, set.union)
@cached_method
def timestamp(self, filename):
return os.path.getmtime(filename)
def extract_timestamp(self, filename):
return self.timestamp(filename), filename
def newest_dependency(self, filename):
return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
def transitive_fingerprint(self, filename, extra=None):
try:
m = hashlib.md5(__version__)
m.update(file_hash(filename))
for x in sorted(self.all_dependencies(filename)):
if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
m.update(file_hash(x))
if extra is not None:
m.update(str(extra))
return m.hexdigest()
except IOError:
return None
def distutils_info0(self, filename):
info = self.parse_dependencies(filename)[3]
externs = self.cimports_and_externs(filename)[1]
if externs:
if 'depends' in info.values:
info.values['depends'] = list(set(info.values['depends']).union(externs))
else:
info.values['depends'] = list(externs)
return info
def distutils_info(self, filename, aliases=None, base=None):
return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
.subs(aliases)
.merge(base))
def transitive_merge(self, node, extract, merge):
try:
seen = self._transitive_cache[extract, merge]
except KeyError:
seen = self._transitive_cache[extract, merge] = {}
return self.transitive_merge_helper(
node, extract, merge, seen, {}, self.cimported_files)[0]
def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
if node in seen:
return seen[node], None
deps = extract(node)
if node in stack:
return deps, node
try:
stack[node] = len(stack)
loop = None
for next in outgoing(node):
sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
if sub_loop is not None:
if loop is not None and stack[loop] < stack[sub_loop]:
pass
else:
loop = sub_loop
deps = merge(deps, sub_deps)
if loop == node:
loop = None
if loop is None:
seen[node] = deps
return deps, loop
finally:
del stack[node]
_dep_tree = None
def create_dependency_tree(ctx=None, quiet=False):
global _dep_tree
if _dep_tree is None:
if ctx is None:
ctx = Context(["."], CompilationOptions(default_options))
_dep_tree = DependencyTree(ctx, quiet=quiet)
return _dep_tree
# This may be useful for advanced users?
def create_extension_list(patterns, exclude=[], ctx=None, aliases=None, quiet=False, language=None,
exclude_failures=False):
if not isinstance(patterns, (list, tuple)):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
module_metadata = {}
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = None
name = '*'
base = None
exn_type = Extension
ext_language = language
elif isinstance(pattern, Extension):
for filepattern in pattern.sources:
if os.path.splitext(filepattern)[1] in ('.py', '.pyx'):
break
else:
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
exn_type = template.__class__
ext_language = None # do not override whatever the Extension says
else:
raise TypeError(pattern)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
pkg = deps.package(file)
if '*' in name:
module_name = deps.fully_qualified_name(file)
if module_name in explicit_modules:
continue
else:
module_name = name
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
sources = [file]
if template is not None:
sources += [m for m in template.sources if m != filepattern]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
extra_sources = kwds['sources']
del kwds['sources']
else:
extra_sources = None
if 'depends' in kwds:
depends = resolve_depends(kwds['depends'], (kwds.get('include_dirs') or []) + [find_root_package_dir(file)])
if template is not None:
# Always include everything from the template.
depends = set(template.depends).union(depends)
# Sort depends to make the metadata dump in the
# Cython-generated C code predictable.
kwds['depends'] = sorted(depends)
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
module_list.append(exn_type(
name=module_name,
sources=sources,
**kwds))
if extra_sources:
kwds['sources'] = extra_sources
module_metadata[module_name] = {'distutils': kwds}
m = module_list[-1]
seen.add(name)
return module_list, module_metadata
# This is the user-exposed entry point.
def cythonize(module_list, exclude=[], nthreads=0, aliases=None, quiet=False, force=False, language=None,
exclude_failures=False, **options):
"""
Compile a set of source modules into C/C++ files and return a list of distutils
Extension objects for them.
As module list, pass either a glob pattern, a list of glob patterns or a list of
Extension objects. The latter allows you to configure the extensions separately
through the normal distutils options.
When using glob patterns, you can exclude certain module names explicitly
by passing them into the 'exclude' option.
To globally enable C++ mode, you can pass language='c++'. Otherwise, this
will be determined at a per-file level based on compiler directives. This
affects only modules found based on file names. Extension instances passed
into cythonize() will not be changed.
For parallel compilation, set the 'nthreads' option to the number of
concurrent builds.
For a broad 'try to compile' mode that ignores compilation failures and
simply excludes the failed extensions, pass 'exclude_failures=True'. Note
that this only really makes sense for compiling .py files which can also
be used without compilation.
Additional compilation options can be passed as keyword arguments.
"""
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
if options.get('cache'):
raise NotImplementedError("common_utility_include_dir does not yet work with caching")
if not os.path.exists(options['common_utility_include_dir']):
os.makedirs(options['common_utility_include_dir'])
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
modules_by_cfile = {}
to_compile = []
for m in module_list:
if build_dir:
root = os.path.realpath(os.path.abspath(find_root_package_dir(m.sources[0])))
def copy_to_build_dir(filepath, root=root):
filepath_abs = os.path.realpath(os.path.abspath(filepath))
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
mod_dir = os.path.join(build_dir,
os.path.dirname(_relpath(filepath, root)))
if not os.path.isdir(mod_dir):
os.makedirs(mod_dir)
shutil.copy(filepath, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
if not os.path.isdir(dir):
os.makedirs(dir)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and hasattr(options, 'cache'):
extra = m.language
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
to_compile.append((priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name)))
new_sources.append(c_file)
if c_file not in modules_by_cfile:
modules_by_cfile[c_file] = [m]
else:
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if hasattr(options, 'cache'):
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
if not nthreads:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if hasattr(options, 'cache'):
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
<?xml version="1.0" ?>
<testsuite name="%(name)s" errors="0" failures="%(failures)s" tests="1" time="%(t)s">
<testcase classname="%(name)s" name="cythonize">
%(failure_item)s
</testcase>
</testsuite>
""".strip() % locals())
output.close()
return with_record
else:
record_results = lambda x: x
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True, embedded_metadata=None, progress=""):
from ..Compiler.Main import compile, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
try:
os.mkdir(options.cache)
except:
if not os.path.exists(options.cache):
raise
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
options.cache, "%s-%s%s" % (os.path.basename(c_file), fingerprint, gzip_ext))
if os.path.exists(fingerprint_file):
if not quiet:
print("%sFound compiled %s in cache" % (progress, pyx_file))
os.utime(fingerprint_file, None)
g = gzip_open(fingerprint_file, 'rb')
try:
f = open(c_file, 'wb')
try:
shutil.copyfileobj(g, f)
finally:
f.close()
finally:
g.close()
return
if not quiet:
print("%sCythonizing %s" % (progress, pyx_file))
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
options.embedded_metadata = embedded_metadata
any_failures = 0
try:
result = compile([pyx_file], options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
f = open(c_file, 'rb')
try:
g = gzip_open(fingerprint_file, 'wb')
try:
shutil.copyfileobj(f, g)
finally:
g.close()
finally:
f.close()
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m)
except Exception:
traceback.print_exc()
raise
def _init_multiprocessing_helper():
# KeyboardInterrupt kills workers, so don't let them get it
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
| apache-2.0 |
etashjian/ECE757-final | tests/configs/realview64-switcheroo-o3.py | 33 | 2453 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
machine_type='VExpress_EMM64',
mem_class=DDR3_1600_x64,
cpu_classes=(DerivO3CPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| bsd-3-clause |
unseenlaser/python-for-android | python3-alpha/python3-src/Lib/test/test_site.py | 49 | 16063 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.support import run_unittest, TESTFN, EnvironmentVarGuard
from test.support import captured_stderr
import builtins
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w', encoding='utf-8')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), r'import bad\)syntax')
self.assertRegex(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 2")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif os.sep == '/':
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
# let's try the specific Apple location
if (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library', 'Python', sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print("#import @bad module name", file=FILE)
print("\n", file=FILE)
print("import %s" % self.imported, file=FILE)
print(self.good_dirname, file=FILE)
print(self.bad_dirname, file=FILE)
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs_paths(self):
# Make sure all imported modules have their __file__ and __cached__
# attributes as absolute paths. Arranging to put the Lib directory on
# PYTHONPATH would cause the os module to have a relative path for
# __file__ if abs_paths() does not get run. sys and builtins (the
# only other modules imported before site.py runs) do not have
# __file__ or __cached__ because they are built-in.
parent = os.path.relpath(os.path.dirname(os.__file__))
env = os.environ.copy()
env['PYTHONPATH'] = parent
code = ('import os, sys',
# use ASCII to avoid locale issues with non-ASCII directories
'os_file = os.__file__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_file + b"\n")',
'os_cached = os.__cached__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_cached + b"\n")')
command = '\n'.join(code)
# First, prove that with -S (no 'import site'), the paths are
# relative.
proc = subprocess.Popen([sys.executable, '-S', '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertFalse(os.path.isabs(os__file__))
self.assertFalse(os.path.isabs(os__cached__))
# Now, with 'import site', it works.
proc = subprocess.Popen([sys.executable, '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertTrue(os.path.isabs(os__file__))
self.assertTrue(os.path.isabs(os__cached__))
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into builtins
self.assertTrue(hasattr(builtins, "quit"))
self.assertTrue(hasattr(builtins, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in builtins
self.assertTrue(hasattr(builtins, "copyright"))
self.assertTrue(hasattr(builtins, "credits"))
def test_setting_help(self):
# 'help' should be set in builtins
self.assertTrue(hasattr(builtins, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.values():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
encukou/samba | python/samba/tests/upgradeprovisionneeddc.py | 32 | 7461 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.upgradeprovision that need a DC."""
import os
import re
import shutil
from samba import param
from samba.credentials import Credentials
from samba.auth import system_session
from samba.provision import getpolicypath,find_provision_key_parameters
from samba.upgradehelpers import (get_paths, get_ldbs,
identic_rename,
updateOEMInfo, getOEMInfo, update_gpo,
delta_update_basesamdb,
update_dns_account_password,
search_constructed_attrs_stored,
increment_calculated_keyversion_number)
from samba.tests import env_loadparm, TestCaseInTempDir
from samba.tests.provision import create_dummy_secretsdb
import ldb
def dummymessage(a=None, b=None):
pass
smb_conf_path = "%s/%s/%s" % (os.environ["SELFTEST_PREFIX"], "ad_dc_ntvfs", "etc/smb.conf")
class UpgradeProvisionBasicLdbHelpersTestCase(TestCaseInTempDir):
"""Some simple tests for individual functions in the provisioning code.
"""
def test_get_ldbs(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
get_ldbs(paths, creds, system_session(), lp)
def test_find_key_param(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
rootdn = "dc=samba,dc=example,dc=com"
ldbs = get_ldbs(paths, creds, system_session(), lp)
names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
paths, smb_conf_path, lp)
self.assertEquals(names.realm, "SAMBA.EXAMPLE.COM")
self.assertEquals(str(names.rootdn).lower(), rootdn.lower())
self.assertNotEquals(names.policyid_dc, None)
self.assertNotEquals(names.ntdsguid, "")
class UpgradeProvisionWithLdbTestCase(TestCaseInTempDir):
def _getEmptyDbName(self):
return os.path.join(self.tempdir, "sam.ldb")
def setUp(self):
super(UpgradeProvisionWithLdbTestCase, self).setUp()
paths = get_paths(param, None, smb_conf_path)
self.creds = Credentials()
self.lp = env_loadparm()
self.creds.guess(self.lp)
self.paths = paths
self.ldbs = get_ldbs(paths, self.creds, system_session(), self.lp)
self.names = find_provision_key_parameters(self.ldbs.sam,
self.ldbs.secrets, self.ldbs.idmap, paths, smb_conf_path,
self.lp)
self.referencedb = create_dummy_secretsdb(
os.path.join(self.tempdir, "ref.ldb"))
def test_search_constructed_attrs_stored(self):
hashAtt = search_constructed_attrs_stored(self.ldbs.sam,
self.names.rootdn,
["msds-KeyVersionNumber"])
self.assertFalse(hashAtt.has_key("msds-KeyVersionNumber"))
def test_increment_calculated_keyversion_number(self):
dn = "CN=Administrator,CN=Users,%s" % self.names.rootdn
# We conctruct a simple hash for the user administrator
hash = {}
# And we want the version to be 140
hash[dn.lower()] = 140
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
# This function should not decrement the version
hash[dn.lower()] = 130
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
def test_identic_rename(self):
rootdn = "DC=samba,DC=example,DC=com"
guestDN = ldb.Dn(self.ldbs.sam, "CN=Guest,CN=Users,%s" % rootdn)
identic_rename(self.ldbs.sam, guestDN)
res = self.ldbs.sam.search(expression="(name=Guest)", base=rootdn,
scope=ldb.SCOPE_SUBTREE, attrs=["dn"])
self.assertEquals(len(res), 1)
self.assertEquals(str(res[0]["dn"]), "CN=Guest,CN=Users,%s" % rootdn)
def test_delta_update_basesamdb(self):
dummysampath = self._getEmptyDbName()
delta_update_basesamdb(self.paths.samdb, dummysampath,
self.creds, system_session(), self.lp,
dummymessage)
def test_update_gpo_simple(self):
dir = getpolicypath(self.paths.sysvol, self.names.dnsdomain,
self.names.policyid)
shutil.rmtree(dir)
self.assertFalse(os.path.isdir(dir))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
self.assertTrue(os.path.isdir(dir))
def test_update_gpo_acl(self):
path = os.path.join(self.tempdir, "testupdategpo")
save = self.paths.sysvol
self.paths.sysvol = path
os.mkdir(path)
os.mkdir(os.path.join(path, self.names.dnsdomain))
os.mkdir(os.path.join(os.path.join(path, self.names.dnsdomain),
"Policies"))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
shutil.rmtree(path)
self.paths.sysvol = save
def test_getOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(oem, "")
def test_update_dns_account(self):
update_dns_account_password(self.ldbs.sam, self.ldbs.secrets,
self.names)
def test_updateOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
updateOEMInfo(self.ldbs.sam, basedn)
oem2 = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(str(oem), str(oem2))
self.assertTrue(re.match(".*upgrade to.*", str(oem2)))
def tearDown(self):
for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb", "sam.ldb"]:
path = os.path.join(self.tempdir, name)
if os.path.exists(path):
os.unlink(path)
super(UpgradeProvisionWithLdbTestCase, self).tearDown()
| gpl-3.0 |
fw1121/escher.github.io | escher/server.py | 1 | 9485 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from escher.plots import (Builder, local_index, model_json_for_name,
map_json_for_name)
from escher.urls import get_url
from escher.urls import root_directory
import os, subprocess
from os.path import join
import tornado.ioloop
from tornado.web import RequestHandler, HTTPError, Application, asynchronous
from tornado.httpclient import AsyncHTTPClient
from tornado import gen
import tornado.escape
from tornado.options import define, options, parse_command_line
import json
import re
from jinja2 import Environment, PackageLoader
from mimetypes import guess_type
from escher.version import __version__, __schema_version__, __map_model_version__
# set up jinja2 template location
env = Environment(loader=PackageLoader('escher', 'templates'))
# set directory to server
NO_CACHE = False
PORT = 7778
PUBLIC = False
CAN_DEV = os.path.exists(join(root_directory, '.can_dev'))
def run(port=PORT, public=PUBLIC):
global PORT
global PUBLIC
PORT = port
PUBLIC = public
print('serving directory %s on port %d' % (root_directory, PORT))
application.listen(port, None if public else "localhost")
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("bye!")
def stop():
tornado.ioloop.IOLoop.instance().stop()
class BaseHandler(RequestHandler):
def serve_path(self, path):
# make sure the path exists
if not os.path.isfile(path):
raise HTTPError(404)
# serve any raw file type
with open(path, "rb") as file:
data = file.read()
# set the mimetype
the_type = guess_type(path, strict=False)[0]
self.set_header("Content-Type", ("application/octet-stream"
if the_type is None
else the_type))
self.serve(data)
def serve(self, data):
if (NO_CACHE):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
self.set_header('Access-Control-Allow-Origin', '*')
self.write(data)
self.finish()
class IndexHandler(BaseHandler):
@asynchronous
@gen.coroutine
def get(self):
# get the organisms, maps, and models
response = yield gen.Task(AsyncHTTPClient().fetch, get_url('server_index', protocol='http'))
if response.code == 200 and response.body is not None:
server_index = response.body.decode('utf-8')
else:
server_index = json.dumps(None)
# get the cached maps and models
index = local_index()
# render the template
template = env.get_template('index.html')
data = template.render(d3=get_url('d3', 'local'),
boot_css=get_url('boot_css', 'local'),
index_css=get_url('index_css', 'local'),
favicon=get_url('favicon', 'local'),
logo=get_url('logo', 'local'),
documentation=get_url('documentation', protocol='https'),
github=get_url('github'),
github_releases=get_url('github_releases'),
index_js=get_url('index_js', 'local'),
index_gh_pages_js=get_url('index_gh_pages_js', 'local'),
map_download=get_url('map_download', 'local'),
# server_index_url=
server_index=server_index,
local_index=json.dumps(index),
can_dev=CAN_DEV,
can_dev_js=json.dumps(CAN_DEV),
version=__version__,
web_version=False)
self.set_header("Content-Type", "text/html")
self.serve(data)
class BuilderHandler(BaseHandler):
@asynchronous
@gen.coroutine
def get(self, kind, path):
# builder vs. viewer
enable_editing = (kind=='builder')
# Builder options
builder_kwargs = {}
for a in ['starting_reaction', 'model_name', 'map_name', 'map_json',
'reaction_no_data_color', 'reaction_no_data_size',
'metabolite_no_data_color', 'metabolite_no_data_size',
'hide_secondary_nodes']:
args = self.get_arguments(a)
if len(args)==1:
builder_kwargs[a] = (True if args[0].lower()=='true' else
(False if args[0].lower()=='false' else
args[0]))
# array args
for a in ['quick_jump', 'metabolite_size_range', 'metabolite_color_range',
'reaction_size_range', 'reaction_color_range', 'gene_styles']:
args = self.get_arguments(a + '[]')
if len(args) > 0:
builder_kwargs[a] = args
# js source
args = self.get_arguments('js_source')
js_source = args[0] if len(args) == 1 else 'web'
# if the server is running locally, then the embedded css must be loaded
# asynchronously using the same server thread.
if js_source in ['dev', 'local']:
global PORT
url = get_url('builder_embed_css',
source='local',
local_host='http://localhost:%d' % PORT)
response = yield gen.Task(AsyncHTTPClient().fetch, url)
if response.code != 200 or response.body is None:
raise Exception('Could not load embedded_css from %s' % url)
builder_kwargs['embedded_css'] = (response.body
.decode('utf-8')
.replace('\n', ' '))
# example data
def load_data_file(rel_path):
"""Load a JSON file with relative path."""
try:
with open(join(root_directory, rel_path), 'r') as f:
return json.load(f)
except:
logging.warn('Could not load example_data file: %s' % rel_path)
if len(self.get_arguments('example_data')) > 0:
r_filepath = 'escher/example_data/reaction_data_iJO1366.json'
builder_kwargs['reaction_data'] = load_data_file(r_filepath)
m_filepath = 'escher/example_data/metabolite_data_iJO1366.json'
builder_kwargs['metabolite_data'] = load_data_file(m_filepath)
# make the builder
builder = Builder(safe=True, **builder_kwargs)
# display options
display_kwargs = {'minified_js': True,
'scroll_behavior': 'pan',
'menu': 'all'}
# keyword
for a in ['menu', 'scroll_behavior', 'minified_js',
'auto_set_data_domain', 'never_ask_before_quit']:
args = self.get_arguments(a)
if len(args)==1:
display_kwargs[a] = (True if args[0].lower()=='true' else
(False if args[0].lower()=='false' else
args[0]))
html = builder._get_html(js_source=js_source, enable_editing=enable_editing,
enable_keys=True, html_wrapper=True, fill_screen=True,
height='100%', **display_kwargs)
self.set_header("Content-Type", "text/html")
self.serve(html)
class MapModelHandler(BaseHandler):
def get(self, path):
try:
kind, organism, name = path.strip('/').split('/')
except (TypeError, ValueError):
raise Exception('invalid path %s' % path)
if kind == 'maps':
b = Builder(map_name=name)
self.set_header('Content-Type', 'application/json')
self.serve(b.loaded_map_json)
else:
b = Builder(model_name=name)
self.set_header('Content-Type', 'application/json')
self.serve(b.loaded_model_json)
class StaticHandler(BaseHandler):
def get(self, path):
path = join(root_directory, path)
print('getting path %s' % path)
self.serve_path(path)
class DocsHandler(BaseHandler):
def get(self, path):
path = join(root_directory, 'docs', '_build', 'html', path)
print('getting path %s' % path)
self.serve_path(path)
settings = {"debug": "False"}
application = Application([
(r"/(escher/lib/.*)", StaticHandler),
(r"/(escher/fonts/.*)", StaticHandler),
(r"/(escher/js/.*)", StaticHandler),
(r"/(escher/css/.*)", StaticHandler),
(r"/(escher/resources/.*)", StaticHandler),
(r"/(escher/jsonschema/.*)", StaticHandler),
(r"/(builder|viewer)(.*)", BuilderHandler),
(r"/%s/%s(/.*)" % (__schema_version__, __map_model_version__), MapModelHandler),
(r"/docs/(.*)", DocsHandler),
(r"/", IndexHandler),
], **settings)
if __name__ == "__main__":
# define port
define("port", default=PORT, type=int, help="Port to serve on.")
define("public", default=PUBLIC, type=bool,
help=("If False, listen only on localhost. If True, listen on "
"all available addresses."))
parse_command_line()
run(port=options.port, public=options.public)
| mit |
damonkohler/sl4a | python/gdata/src/gdata/contacts/__init__.py | 134 | 13897 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
__author__ = 'dbrattli (Dag Brattli)'
import atom
import gdata
## Constants from http://code.google.com/apis/gdata/elements.html ##
REL_HOME = 'http://schemas.google.com/g/2005#home'
REL_WORK = 'http://schemas.google.com/g/2005#work'
REL_OTHER = 'http://schemas.google.com/g/2005#other'
IM_AIM = 'http://schemas.google.com/g/2005#AIM' # AOL Instant Messenger protocol
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
# Google Talk protocol
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
PHONE_CAR = 'http://schemas.google.com/g/2005#car' # Number of a car phone.
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
# Unknown or unspecified type, such as a business phone number that doesn't
# belong to a particular person.
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
PHONE_HOME = REL_HOME
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
# Phone number that makes sense only in a context known to the user (such as
# an enterprise PBX).
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
# A special type of number for which no other rel value makes sense.
# For example, a TTY device. label can be used to indicate the actual type.
PHONE_OTHER = REL_OTHER
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
PHONE_WORK = REL_WORK
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
class OrgName(atom.AtomBase):
_tag = 'orgName'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class OrgTitle(atom.AtomBase):
_tag = 'orgTitle'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Organization(atom.AtomBase):
_tag = 'organization'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
_attributes['primary'] = 'primary'
_children['{%s}orgName' % gdata.GDATA_NAMESPACE] = ('org_name', OrgName)
_children['{%s}orgTitle' % gdata.GDATA_NAMESPACE] = ('org_title', OrgTitle)
def __init__(self, rel=None, primary='false', org_name=None, org_title=None,
label=None, text=None, extension_elements=None,
extension_attributes=None):
self.rel = rel or REL_OTHER
self.primary = primary
self.org_name = org_name
self.org_title = org_title
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class PostalAddress(atom.AtomBase):
_tag = 'postalAddress'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
self.primary = primary
self.rel = rel or REL_OTHER
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class IM(atom.AtomBase):
_tag = 'im'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['protocol'] = 'protocol'
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, primary=None, rel=None, address=None, protocol=None,
label=None, text=None, extension_elements=None,
extension_attributes=None):
self.protocol = protocol
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Email(atom.AtomBase):
_tag = 'email'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
def __init__(self, primary=None, rel=None, address=None, text=None,
label=None, extension_elements=None, extension_attributes=None):
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class PhoneNumber(atom.AtomBase):
_tag = 'phoneNumber'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
self.primary = primary
self.rel = rel or REL_OTHER
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Deleted(atom.AtomBase):
_tag = 'deleted'
_namespace = gdata.GDATA_NAMESPACE
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class GroupMembershipInfo(atom.AtomBase):
_tag = 'groupMembershipInfo'
_namespace = CONTACTS_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['deleted'] = 'deleted'
_attributes['href'] = 'href'
def __init__(self, deleted=None, href=None, text=None,
extension_elements=None, extension_attributes=None):
self.deleted = deleted
self.href = href
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Birthday(atom.AtomBase):
_tag = 'birthday'
_namespace = CONTACTS_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['when'] = 'when'
def __init__(self, when=None, text=None, extension_elements=None,
extension_attributes=None):
self.when = when
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class ContactEntry(gdata.BatchEntry):
"""A Google Contact flavor of an Atom Entry """
_children = gdata.BatchEntry._children.copy()
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
[PostalAddress])
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = ('phone_number',
[PhoneNumber])
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = ('organization',
Organization)
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
'group_membership_info', [GroupMembershipInfo])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, email=None, postal_address=None,
deleted=None, organization=None, phone_number=None, im=None,
extended_property=None, group_membership_info=None, birthday=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status, title=title, updated=updated)
self.organization = organization
self.deleted = deleted
self.phone_number = phone_number or []
self.postal_address = postal_address or []
self.im = im or []
self.extended_property = extended_property or []
self.email = email or []
self.group_membership_info = group_membership_info or []
self.birthday = birthday
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
def ContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Contacts feed flavor of an Atom Feed"""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ContactsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
class GroupEntry(gdata.BatchEntry):
"""Represents a contact group."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
extended_property=None, batch_operation=None, batch_id=None,
batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status,
title=title, updated=updated)
self.extended_property = extended_property or []
def GroupEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
class GroupsFeed(gdata.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed"""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
def GroupsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
| apache-2.0 |
kawasaki2013/python-for-android-x86 | python-modules/twisted/twisted/words/protocols/jabber/ijabber.py | 54 | 5333 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Public Jabber Interfaces.
"""
from zope.interface import Attribute, Interface
class IInitializer(Interface):
"""
Interface for XML stream initializers.
Initializers perform a step in getting the XML stream ready to be
used for the exchange of XML stanzas.
"""
class IInitiatingInitializer(IInitializer):
"""
Interface for XML stream initializers for the initiating entity.
"""
xmlstream = Attribute("""The associated XML stream""")
def initialize():
"""
Initiate the initialization step.
May return a deferred when the initialization is done asynchronously.
"""
class IIQResponseTracker(Interface):
"""
IQ response tracker interface.
The XMPP stanza C{iq} has a request-response nature that fits
naturally with deferreds. You send out a request and when the response
comes back a deferred is fired.
The L{IQ} class implements a C{send} method that returns a deferred. This
deferred is put in a dictionary that is kept in an L{XmlStream} object,
keyed by the request stanzas C{id} attribute.
An object providing this interface (usually an instance of L{XmlStream}),
keeps the said dictionary and sets observers on the iq stanzas of type
C{result} and C{error} and lets the callback fire the associated deferred.
"""
iqDeferreds = Attribute("Dictionary of deferreds waiting for an iq "
"response")
class IXMPPHandler(Interface):
"""
Interface for XMPP protocol handlers.
Objects that provide this interface can be added to a stream manager to
handle of (part of) an XMPP extension protocol.
"""
parent = Attribute("""XML stream manager for this handler""")
xmlstream = Attribute("""The managed XML stream""")
def setHandlerParent(parent):
"""
Set the parent of the handler.
@type parent: L{IXMPPHandlerCollection}
"""
def disownHandlerParent(parent):
"""
Remove the parent of the handler.
@type parent: L{IXMPPHandlerCollection}
"""
def makeConnection(xs):
"""
A connection over the underlying transport of the XML stream has been
established.
At this point, no traffic has been exchanged over the XML stream
given in C{xs}.
This should setup L{xmlstream} and call L{connectionMade}.
@type xs: L{XmlStream<twisted.words.protocols.jabber.XmlStream>}
"""
def connectionMade():
"""
Called after a connection has been established.
This method can be used to change properties of the XML Stream, its
authenticator or the stream manager prior to stream initialization
(including authentication).
"""
def connectionInitialized():
"""
The XML stream has been initialized.
At this point, authentication was successful, and XML stanzas can be
exchanged over the XML stream L{xmlstream}. This method can be
used to setup observers for incoming stanzas.
"""
def connectionLost(reason):
"""
The XML stream has been closed.
Subsequent use of L{parent.send} will result in data being queued
until a new connection has been established.
@type reason: L{twisted.python.failure.Failure}
"""
class IXMPPHandlerCollection(Interface):
"""
Collection of handlers.
Contain several handlers and manage their connection.
"""
def __iter__():
"""
Get an iterator over all child handlers.
"""
def addHandler(handler):
"""
Add a child handler.
@type handler: L{IXMPPHandler}
"""
def removeHandler(handler):
"""
Remove a child handler.
@type handler: L{IXMPPHandler}
"""
class IService(Interface):
"""
External server-side component service interface.
Services that provide this interface can be added to L{ServiceManager} to
implement (part of) the functionality of the server-side component.
"""
def componentConnected(xs):
"""
Parent component has established a connection.
At this point, authentication was succesful, and XML stanzas
can be exchanged over the XML stream L{xs}. This method can be used
to setup observers for incoming stanzas.
@param xs: XML Stream that represents the established connection.
@type xs: L{xmlstream.XmlStream}
"""
def componentDisconnected():
"""
Parent component has lost the connection to the Jabber server.
Subsequent use of C{self.parent.send} will result in data being
queued until a new connection has been established.
"""
def transportConnected(xs):
"""
Parent component has established a connection over the underlying
transport.
At this point, no traffic has been exchanged over the XML stream. This
method can be used to change properties of the XML Stream (in L{xs}),
the service manager or it's authenticator prior to stream
initialization (including authentication).
"""
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.