repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
FusionSP/android_external_chromium_org | third_party/tlslite/tlslite/utils/openssl_rsakey.py | 200 | 4670 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto RSA implementation."""
from .cryptomath import *
from .rsakey import *
from .python_rsakey import Python_RSAKey
#copied from M2Crypto.util.py, so when we load the local copy of m2
#we can still use it
def password_callback(v, prompt1='Enter private key passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
if m2cryptoLoaded:
class OpenSSL_RSAKey(RSAKey):
def __init__(self, n=0, e=0):
self.rsa = None
self._hasPrivateKey = False
if (n and not e) or (e and not n):
raise AssertionError()
if n and e:
self.rsa = m2.rsa_new()
m2.rsa_set_n(self.rsa, numberToMPI(n))
m2.rsa_set_e(self.rsa, numberToMPI(e))
def __del__(self):
if self.rsa:
m2.rsa_free(self.rsa)
def __getattr__(self, name):
if name == 'e':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_e(self.rsa))
elif name == 'n':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_n(self.rsa))
else:
raise AttributeError
def hasPrivateKey(self):
return self._hasPrivateKey
def _rawPrivateKeyOp(self, m):
b = numberToByteArray(m, numBytes(self.n))
s = m2.rsa_private_encrypt(self.rsa, bytes(b), m2.no_padding)
c = bytesToNumber(bytearray(s))
return c
def _rawPublicKeyOp(self, c):
b = numberToByteArray(c, numBytes(self.n))
s = m2.rsa_public_decrypt(self.rsa, bytes(b), m2.no_padding)
m = bytesToNumber(bytearray(s))
return m
def acceptsPassword(self): return True
def write(self, password=None):
bio = m2.bio_new(m2.bio_s_mem())
if self._hasPrivateKey:
if password:
def f(v): return password
m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f)
else:
def f(): pass
m2.rsa_write_key_no_cipher(self.rsa, bio, f)
else:
if password:
raise AssertionError()
m2.rsa_write_pub_key(self.rsa, bio)
s = m2.bio_read(bio, m2.bio_ctrl_pending(bio))
m2.bio_free(bio)
return s
def generate(bits):
key = OpenSSL_RSAKey()
def f():pass
key.rsa = m2.rsa_generate_key(bits, 3, f)
key._hasPrivateKey = True
return key
generate = staticmethod(generate)
def parse(s, passwordCallback=None):
# Skip forward to the first PEM header
start = s.find("-----BEGIN ")
if start == -1:
raise SyntaxError()
s = s[start:]
if s.startswith("-----BEGIN "):
if passwordCallback==None:
callback = password_callback
else:
def f(v, prompt1=None, prompt2=None):
return passwordCallback()
callback = f
bio = m2.bio_new(m2.bio_s_mem())
try:
m2.bio_write(bio, s)
key = OpenSSL_RSAKey()
if s.startswith("-----BEGIN RSA PRIVATE KEY-----"):
def f():pass
key.rsa = m2.rsa_read_key(bio, callback)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
elif s.startswith("-----BEGIN PUBLIC KEY-----"):
key.rsa = m2.rsa_read_pub_key(bio)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = False
else:
raise SyntaxError()
return key
finally:
m2.bio_free(bio)
else:
raise SyntaxError()
parse = staticmethod(parse)
| bsd-3-clause |
guschmue/tensorflow | tensorflow/contrib/gan/python/eval/python/summaries.py | 63 | 1174 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TFGAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.eval.python import summaries_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.eval.python.summaries_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = summaries_impl.__all__
remove_undocumented(__name__, __all__)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/numpy/distutils/command/install.py | 16 | 3064 | from __future__ import division, absolute_import, print_function
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
have_setuptools = True
else:
import distutils.command.install as old_install_mod
have_setuptools = False
old_install = old_install_mod.install
from distutils.file_util import write_file
class install(old_install):
# Always run install_clib - the command is cheap, so no need to bypass it;
# but it's not run by setuptools -- so it's run again in install_data
sub_commands = old_install.sub_commands + [
('install_clib', lambda x: True)
]
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
def setuptools_run(self):
""" The setuptools version of the .run() method.
We must pull in the entire code so we can override the level used in the
_getframe() call since we wrap this call by one more level.
"""
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return old_install_mod._install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(3)
caller_module = caller.f_globals.get('__name__', '')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
old_install_mod._install.run(self)
else:
self.do_egg_install()
def run(self):
if not have_setuptools:
r = old_install.run(self)
else:
r = self.setuptools_run()
if self.record:
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
f = open(self.record, 'r')
lines = []
need_rewrite = False
for l in f:
l = l.rstrip()
if ' ' in l:
need_rewrite = True
l = '"%s"' % (l)
lines.append(l)
f.close()
if need_rewrite:
self.execute(write_file,
(self.record, lines),
"re-writing list of installed files to '%s'" %
self.record)
return r
| gpl-3.0 |
warner83/micropython | tools/tinytest-codegen.py | 47 | 2299 | #! /usr/bin/env python3
import os, sys
from glob import glob
from re import sub
def escape(s):
lookup = {
'\0': '\\0',
'\t': '\\t',
'\n': '\\n\"\n\"',
'\r': '\\r',
'\\': '\\\\',
'\"': '\\\"',
}
return "\"\"\n\"{}\"".format(''.join([lookup[x] if x in lookup else x for x in s]))
def chew_filename(t):
return { 'func': "test_{}_fn".format(sub(r'/|\.|-', '_', t)), 'desc': t.split('/')[1] }
def script_to_map(t):
r = { 'name': chew_filename(t)['func'] }
with open(t) as f: r['script'] = escape(''.join(f.readlines()))
return r
test_function = (
"void {name}(void* data) {{\n"
" const char * pystr = {script};\n"
" do_str(pystr);\n"
"}}"
)
testcase_struct = (
"struct testcase_t {name}_tests[] = {{\n{body}\n END_OF_TESTCASES\n}};"
)
testcase_member = (
" {{ \"{desc}\", {func}, TT_ENABLED_, 0, 0 }},"
)
testgroup_struct = (
"struct testgroup_t groups[] = {{\n{body}\n END_OF_GROUPS\n}};"
)
testgroup_member = (
" {{ \"{name}/\", {name}_tests }},"
)
## XXX: may be we could have `--without <groups>` argument...
# currently these tests are selected because they pass on qemu-arm
# basics/try_finally1.py passes on local machine but only passes 50% of the
# time on Travis CI; no idea why so we just disable it.
test_dirs = ('basics', 'micropython', 'inlineasm') # 'float', 'import', 'io', 'misc')
exclude_tests = ('basics/builtin_override.py', 'basics/class_super_object.py', 'basics/memoryerror.py', 'basics/try_finally1.py', 'micropython/heapalloc.py', 'inlineasm/asmfpaddsub.py', 'inlineasm/asmfpcmp.py', 'inlineasm/asmfpldrstr.py', 'inlineasm/asmfpmuldiv.py', 'inlineasm/asmfpsqrt.py',)
output = []
for group in test_dirs:
tests = [test for test in glob('{}/*.py'.format(group)) if test not in exclude_tests]
output.extend([test_function.format(**script_to_map(test)) for test in tests])
testcase_members = [testcase_member.format(**chew_filename(test)) for test in tests]
output.append(testcase_struct.format(name=group, body='\n'.join(testcase_members)))
testgroup_members = [testgroup_member.format(name=group) for group in test_dirs]
output.append(testgroup_struct.format(body='\n'.join(testgroup_members)))
## XXX: may be we could have `--output <filename>` argument...
print('\n\n'.join(output))
| mit |
MehdiSfr/tensor-flow | tensorflow/python/summary/impl/reservoir_test.py | 5 | 7403 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.summary.impl import reservoir
class ReservoirTest(tf.test.TestCase):
def testEmptyReservoir(self):
r = reservoir.Reservoir(1)
self.assertFalse(r.Keys())
def testRespectsSize(self):
r = reservoir.Reservoir(42)
self.assertEqual(r._buckets['meaning of life']._max_size, 42)
def testItemsAndKeys(self):
r = reservoir.Reservoir(42)
r.AddItem('foo', 4)
r.AddItem('bar', 9)
r.AddItem('foo', 19)
self.assertItemsEqual(r.Keys(), ['foo', 'bar'])
self.assertEqual(r.Items('foo'), [4, 19])
self.assertEqual(r.Items('bar'), [9])
def testExceptions(self):
with self.assertRaises(ValueError):
reservoir.Reservoir(-1)
with self.assertRaises(ValueError):
reservoir.Reservoir(13.3)
r = reservoir.Reservoir(12)
with self.assertRaises(KeyError):
r.Items('missing key')
def testDeterminism(self):
"""Tests that the reservoir is deterministic."""
key = 'key'
r1 = reservoir.Reservoir(10)
r2 = reservoir.Reservoir(10)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertEqual(r1.Items(key), r2.Items(key))
def testBucketDeterminism(self):
"""Tests that reservoirs are deterministic at a bucket level.
This means that only the order elements are added within a bucket matters.
"""
separate_reservoir = reservoir.Reservoir(10)
interleaved_reservoir = reservoir.Reservoir(10)
for i in xrange(100):
separate_reservoir.AddItem('key1', i)
for i in xrange(100):
separate_reservoir.AddItem('key2', i)
for i in xrange(100):
interleaved_reservoir.AddItem('key1', i)
interleaved_reservoir.AddItem('key2', i)
for key in ['key1', 'key2']:
self.assertEqual(separate_reservoir.Items(key),
interleaved_reservoir.Items(key))
def testUsesSeed(self):
"""Tests that reservoirs with different seeds keep different samples."""
key = 'key'
r1 = reservoir.Reservoir(10, seed=0)
r2 = reservoir.Reservoir(10, seed=1)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertNotEqual(r1.Items(key), r2.Items(key))
class ReservoirBucketTest(tf.test.TestCase):
def testEmptyBucket(self):
b = reservoir._ReservoirBucket(1)
self.assertFalse(b.Items())
def testFillToSize(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(100):
b.AddItem(i)
self.assertEqual(b.Items(), list(xrange(100)))
self.assertEqual(b._num_items_seen, 100)
def testDoesntOverfill(self):
b = reservoir._ReservoirBucket(10)
for i in xrange(1000):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 1000)
def testMaintainsOrder(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
items = b.Items()
prev = -1
for item in items:
self.assertTrue(item > prev)
prev = item
def testKeepsLatestItem(self):
b = reservoir._ReservoirBucket(5)
for i in xrange(100):
b.AddItem(i)
last = b.Items()[-1]
self.assertEqual(last, i)
def testSizeOneBucket(self):
b = reservoir._ReservoirBucket(1)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), [i])
self.assertEqual(b._num_items_seen, 20)
def testSizeZeroBucket(self):
b = reservoir._ReservoirBucket(0)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), list(range(i + 1)))
self.assertEqual(b._num_items_seen, 20)
def testSizeRequirement(self):
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(-1)
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(10.3)
def testRemovesItems(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 10)
self.assertEqual(b.FilterItems(lambda x: x <= 7), 2)
self.assertEqual(len(b.Items()), 8)
self.assertEqual(b._num_items_seen, 8)
def testRemovesItemsWhenItemsAreReplaced(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
self.assertEqual(b._num_items_seen, 10000)
# Remove items
num_removed = b.FilterItems(lambda x: x <= 7)
self.assertGreater(num_removed, 92)
self.assertEqual([], [item for item in b.Items() if item > 7])
self.assertEqual(b._num_items_seen,
int(round(10000 * (1 - float(num_removed) / 100))))
class ReservoirBucketStatisticalDistributionTest(tf.test.TestCase):
def setUp(self):
self.total = 1000000
self.samples = 10000
self.n_buckets = 100
self.total_per_bucket = self.total // self.n_buckets
self.assertEqual(self.total % self.n_buckets, 0, 'total must be evenly '
'divisible by the number of buckets')
self.assertTrue(self.total > self.samples, 'need to have more items '
'than samples')
def AssertBinomialQuantity(self, measured):
p = 1.0 * self.n_buckets / self.samples
mean = p * self.samples
variance = p * (1 - p) * self.samples
error = measured - mean
# Given that the buckets were actually binomially distributed, this
# fails with probability ~2E-9
passed = error * error <= 36.0 * variance
self.assertTrue(passed, 'found a bucket with measured %d '
'too far from expected %d' % (measured, mean))
def testBucketReservoirSamplingViaStatisticalProperties(self):
# Not related to a 'ReservoirBucket', but instead number of buckets we put
# samples into for testing the shape of the distribution
b = reservoir._ReservoirBucket(_max_size=self.samples)
# add one extra item because we always keep the most recent item, which
# would skew the distribution; we can just slice it off the end instead.
for i in xrange(self.total + 1):
b.AddItem(i)
divbins = [0] * self.n_buckets
modbins = [0] * self.n_buckets
# Slice off the last item when we iterate.
for item in b.Items()[0:-1]:
divbins[item // self.total_per_bucket] += 1
modbins[item % self.n_buckets] += 1
for bucket_index in xrange(self.n_buckets):
divbin = divbins[bucket_index]
modbin = modbins[bucket_index]
self.AssertBinomialQuantity(divbin)
self.AssertBinomialQuantity(modbin)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
wolfskaempf/ga_statistics | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 1009 | 2281 | import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit |
nrwahl2/ansible | lib/ansible/modules/network/f5/bigip_gtm_virtual_server.py | 16 | 8532 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Michael Perzel
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_virtual_server
short_description: "Manages F5 BIG-IP GTM virtual servers"
description:
- "Manages F5 BIG-IP GTM virtual servers"
version_added: "2.2"
author:
- Michael Perzel (@perzizzle)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
state:
description:
- Virtual server state
required: false
default: present
choices: ['present', 'absent','enabled','disabled']
virtual_server_name:
description:
- Virtual server name
required: True
virtual_server_server:
description:
- Virtual server server
required: true
host:
description:
- Virtual server host
required: false
default: None
aliases: ['address']
port:
description:
- Virtual server port
required: false
default: None
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Enable virtual server
local_action: >
bigip_gtm_virtual_server
server=192.0.2.1
user=admin
password=mysecret
virtual_server_name=myname
virtual_server_server=myserver
state=enabled
'''
RETURN = '''# '''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5_utils import bigip_api, f5_argument_spec
def server_exists(api, server):
# hack to determine if virtual server exists
result = False
try:
api.GlobalLB.Server.get_object_status([server])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def virtual_server_exists(api, name, server):
# hack to determine if virtual server exists
result = False
try:
virtual_server_id = {'name': name, 'server': server}
api.GlobalLB.VirtualServerV2.get_object_status([virtual_server_id])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def add_virtual_server(api, virtual_server_name, virtual_server_server, address, port):
addresses = {'address': address, 'port': port}
virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
api.GlobalLB.VirtualServerV2.create([virtual_server_id], [addresses])
def remove_virtual_server(api, virtual_server_name, virtual_server_server):
virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server}
api.GlobalLB.VirtualServerV2.delete_virtual_server([virtual_server_id])
def get_virtual_server_state(api, name, server):
virtual_server_id = {'name': name, 'server': server}
state = api.GlobalLB.VirtualServerV2.get_enabled_state([virtual_server_id])
state = state[0].split('STATE_')[1].lower()
return state
def set_virtual_server_state(api, name, server, state):
virtual_server_id = {'name': name, 'server': server}
state = "STATE_%s" % state.strip().upper()
api.GlobalLB.VirtualServerV2.set_enabled_state([virtual_server_id], [state])
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
host=dict(type='str', default=None, aliases=['address']),
port=dict(type='int', default=None),
virtual_server_name=dict(type='str', required=True),
virtual_server_server=dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
validate_certs = module.params['validate_certs']
user = module.params['user']
password = module.params['password']
virtual_server_name = module.params['virtual_server_name']
virtual_server_server = module.params['virtual_server_server']
state = module.params['state']
address = module.params['host']
port = module.params['port']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if state == 'absent':
if virtual_server_exists(api, virtual_server_name, virtual_server_server):
if not module.check_mode:
remove_virtual_server(api, virtual_server_name, virtual_server_server)
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if virtual_server_name and virtual_server_server and address and port:
if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
if not module.check_mode:
if server_exists(api, virtual_server_server):
add_virtual_server(api, virtual_server_name, virtual_server_server, address, port)
result = {'changed': True}
else:
module.fail_json(msg="server does not exist")
else:
# check-mode return value
result = {'changed': True}
else:
# virtual server exists -- potentially modify attributes --future feature
result = {'changed': False}
else:
module.fail_json(msg="Address and port are required to create virtual server")
elif state == 'enabled':
if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
module.fail_json(msg="virtual server does not exist")
if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
if not module.check_mode:
set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
result = {'changed': True}
else:
result = {'changed': True}
elif state == 'disabled':
if not virtual_server_exists(api, virtual_server_name, virtual_server_server):
module.fail_json(msg="virtual server does not exist")
if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server):
if not module.check_mode:
set_virtual_server_state(api, virtual_server_name, virtual_server_server, state)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
craigderington/studentloan5 | studentloan5/Lib/site-packages/setuptools/command/sdist.py | 385 | 7079 | from glob import glob
from distutils import log
import distutils.command.sdist as orig
import os
import sys
from setuptools.compat import PY3
from setuptools.utils import cs_path_exists
import pkg_resources
READMES = 'README', 'README.rst', 'README.txt'
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Call check_metadata only if no 'check' command
# (distutils <= 2.6)
import distutils.command
if 'check' not in distutils.command.__all__:
self.check_metadata()
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def add_defaults(self):
standards = [READMES,
self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if cs_path_exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = list(filter(cs_path_exists, glob(pattern)))
if files:
self.filelist.extend(files)
# getting python files
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
# This functionality is incompatible with include_package_data, and
# will in fact create an infinite recursion if include_package_data
# is True. Use of include_package_data will imply that
# distutils-style automatic handling of package_data is disabled
if not self.distribution.include_package_data:
for _, src_dir, _, filenames in build_py.data_files:
self.filelist.extend([os.path.join(src_dir, filename)
for filename in filenames])
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def check_readme(self):
for f in READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest, 'rbU')
try:
first_line = fp.readline()
finally:
fp.close()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rbU')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if PY3:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
| bsd-3-clause |
factorlibre/OCB | addons/l10n_fr_rib/bank.py | 335 | 4952 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_partner_bank(osv.osv):
"""Add fields and behavior for French RIB"""
_inherit = "res.partner.bank"
def _check_key(self, cr, uid, ids):
"""Check the RIB key"""
for bank_acc in self.browse(cr, uid, ids):
# Ignore the accounts of type other than rib
if bank_acc.state != 'rib':
continue
# Fail if the needed values are empty of too short
if (not bank_acc.bank_code
or len(bank_acc.bank_code) != 5
or not bank_acc.office or len(bank_acc.office) != 5
or not bank_acc.rib_acc_number or len(bank_acc.rib_acc_number) != 11
or not bank_acc.key or len(bank_acc.key) != 2):
return False
# Get the rib data (without the key)
rib = "%s%s%s" % (bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number)
# Translate letters into numbers according to a specific table
# (notice how s -> 2)
table = dict((ord(a), b) for a, b in zip(
u'abcdefghijklmnopqrstuvwxyz', u'12345678912345678923456789'))
rib = rib.lower().translate(table)
# compute the key
key = 97 - (100 * int(rib)) % 97
if int(bank_acc.key) != key:
raise osv.except_osv(_('Error!'),
_("The RIB key %s does not correspond to the other codes: %s %s %s.") % \
(bank_acc.key, bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number) )
if bank_acc.acc_number:
if not self.is_iban_valid(cr, uid, bank_acc.acc_number):
raise osv.except_osv(_('Error!'), _("The IBAN %s is not valid.") % bank_acc.acc_number)
return True
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
"""Change the bank code"""
result = super(res_partner_bank, self).onchange_bank_id(cr, uid, ids, bank_id,
context=context)
if bank_id:
value = result.setdefault('value', {})
bank = self.pool.get('res.bank').browse(cr, uid, bank_id,
context=context)
value['bank_code'] = bank.rib_code
return result
_columns = {
'acc_number': fields.char('Account Number', size=64, required=False),
'rib_acc_number': fields.char('RIB account number', size=11, readonly=True,),
'bank_code': fields.char('Bank Code', size=64, readonly=True,),
'office': fields.char('Office Code', size=5, readonly=True,),
'key': fields.char('Key', size=2, readonly=True,
help="The key is a number allowing to check the "
"correctness of the other codes."),
}
_constraints = [(_check_key, 'The RIB and/or IBAN is not valid', ['rib_acc_number', 'bank_code', 'office', 'key'])]
class res_bank(osv.osv):
"""Add the bank code to make it easier to enter RIB data"""
_inherit = 'res.bank'
def name_search(self, cr, user, name, args=None, operator='ilike',
context=None, limit=80):
"""Search by bank code in addition to the standard search"""
# Get the standard results
results = super(res_bank, self).name_search(cr, user,
name, args=args ,operator=operator, context=context, limit=limit)
# Get additional results using the RIB code
ids = self.search(cr, user, [('rib_code', operator, name)],
limit=limit, context=context)
# Merge the results
results = list(set(results + self.name_get(cr, user, ids, context)))
return results
_columns = {
'rib_code': fields.char('RIB Bank Code'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
diagramsoftware/odoo | addons/anonymization/anonymization.py | 23 | 28730 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools import pickle
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context=context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'html':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, -1)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '8.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CENDARI/editorsnotes | editorsnotes/djotero/views.py | 1 | 7554 | import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from editorsnotes.main.models.auth import User
from .models import CachedArchive
from . import utils
#@login_required
#def import_zotero(request, username=False):
# o = {}
# if not username:
# user = request.user
# else:
# user = get_object_or_404(User, username=username)
# if user.get_profile().zotero_uid and user.get_profile().zotero_key:
# o['zotero_status'] = True
# o['zotero_uid'] = user.get_profile().zotero_uid
# else:
# o['zotero_status'] = False
# o['get_params'] = request.GET.urlencode().replace('%2F', '/')
# if request.GET.get('apply', ''):
# o['apply_to_docs'] = True
# else:
# o['apply_to_docs'] = False
# return render_to_response(
# 'import-zotero.html', o, context_instance=RequestContext(request))
#
#def libraries(request):
# if not request.is_ajax():
# return HttpResponseBadRequest()
# if request.GET.get('validate', ''):
# zotero_uid = request.GET.get('zotero_uid')
# zotero_key = request.GET.get('zotero_key')
# else:
# zotero_uid = request.user.get_profile().zotero_uid
# zotero_key = request.user.get_profile().zotero_key
# libraries = utils.get_libraries(zotero_uid, zotero_key)
# return HttpResponse(json.dumps(libraries), content_type='application/json')
#
#def collections(request):
# if not request.is_ajax():
# return HttpResponseBadRequest()
# loc = request.GET.get('loc', '')
# top_level = request.GET.get('top', 0)
# zotero_key = request.user.get_profile().zotero_key
# collections = utils.get_collections(zotero_key, loc, int(top_level))
# return HttpResponse(json.dumps(collections), content_type='application/json')
#
#def items(request):
# if not request.is_ajax():
# return HttpResponseBadRequest()
# loc = request.GET.get('loc', '')
# opts = json.loads(request.GET.get('opts', '{}'))
# zotero_key = request.user.get_profile().zotero_key
# items = utils.get_items(zotero_key, loc, opts)
# return HttpResponse(json.dumps(items), content_type='application/json')
#
#def items_continue(request):
# request.session['import_complete'] = False
# selected_items_list = request.POST.getlist('zotero-item')
# selected_items = [json.loads(item, strict=False) for item in selected_items_list]
# o = {}
# o['items'] = []
# o['get_params'] = request.GET.urlencode().replace('%2F', '/')
# for item in selected_items:
# item_return = {}
# #Check if this exact item has been imported before
# if ZoteroLink.objects.filter(zotero_url=item['url']):
# item_return['existing'] = ('exact',
# ZoteroLink.objects.filter(zotero_url=item['url'])[0].doc)
# else:
# item_return['existing'] = False
# #TODO: Check if item with this title/creators/date has been imported
#
# #Get related topics for tags
# #item_return['related_topics'] = []
# #for tag in item['tags']:
# # query = ' AND '.join([ 'title:%s' % term for term
# # in tag['tag'].split() if len(term) > 1 ])
# # topic_match_set = [(result.object.preferred_name, result.object.id) for result
# # in SearchQuerySet().models(Topic).narrow(query)
# # if result.score >= 40]
# # if topic_match_set:
# # item_return['related_topics'].append(topic_match_set)
# item_return['data'] = json.dumps(item)
# item_return['citation'] = item['citation']
# o['items'].append(item_return)
# return render_to_response(
# 'continue.html', o, context_instance=RequestContext(request))
#
#def import_items(request):
# if request.session.get('import_complete', False):
# return HttpResponse('Please only submit items once')
# item_data = request.POST.getlist('data')
# item_citations = request.POST.getlist('changed-citation')
# user = request.user
# o={}
# o['created_items'] = []
# item_counter = 0
# for item_data_string, updated_citation in zip(item_data, item_citations):
# item_counter += 1
# action = request.POST.get('import-action-%s' % item_counter)
# if action not in ['create', 'update']:
# continue
# item_data = json.loads(item_data_string, object_pairs_hook=OrderedDict)
# if updated_citation:
# citation = updated_citation
# else:
# citation = item_data['citation']
# if action == "create":
# d = Document(creator=user, last_updater=user, description=citation)
# d.save()
# elif action == "update":
# update_id = request.POST.get('item-update-%s' % item_counter)
# d = Document.objects.get(id=update_id)
# d.last_updated = datetime.datetime.now()
# d.last_updater = user
# d.save()
# if d.zotero_link():
# d.zotero_link().delete()
# link = ZoteroLink(zotero_data=item_data['json'], zotero_url=item_data['url'], doc_id=d.id)
# try:
# item_data['date']['year']
# link.date_information = json.dumps(item_data['date'])
# except KeyError:
# pass
# link.save()
# reltopic = request.GET.get('reltopic', False)
# if reltopic:
# related_topic = Topic.objects.get(id=int(reltopic))
# new_assignment = TopicAssignment.objects.create(
# content_object=d,
# topic=related_topic,
# creator=user
# )
# new_assignment.save()
# o['created_items'].append(d)
# request.session['import_complete'] = True
# redirect_url = request.GET.get('return_to', '/')
# return HttpResponseRedirect(redirect_url)
@login_required
def update_zotero_info(request, username=None):
if not username:
user = request.user
else:
user = get_object_or_404(User, username=username)
user.zotero_uid = request.POST.__getitem__('zotero-id')
user.zotero_key = request.POST.__getitem__('zotero-key')
user.save()
redirect_url = request.GET.get('return_to', '/')
return HttpResponseRedirect(redirect_url)
###
def item_template(request):
item_type = request.GET.get('itemType')
if not item_type:
return HttpResponseBadRequest()
item_template = utils.get_item_template(item_type)
return HttpResponse(json.dumps(item_template), content_type='application/json')
def item_types(request):
types = utils.get_item_types()
return HttpResponse(json.dumps(types), content_type='application/json')
def item_type_creators(request):
item_type = request.GET.get('itemType')
if not item_type:
return HttpResponseBadRequest()
creators = utils.get_creator_types(item_type)
return HttpResponse(json.dumps(creators), content_type='application/json')
def archive_to_dict(archive):
archive_dict = {}
archive_dict['name'] = archive.name
archive_dict['id'] = archive.id
return archive_dict
def api_archives(request):
q = request.GET.get('q', '')
queryset = CachedArchive.objects.filter(
name__icontains=q
)
archives = [ archive_to_dict(a) for a in queryset ]
return HttpResponse(json.dumps(archives), content_type='text/plain')
| agpl-3.0 |
james-jz-zheng/jjzz | ml/stock_prediction.py | 1 | 7279 | import yahoo_finance as yhf
from sklearn import *
import os.path, os, sys
import pickle
import numpy as np
import datetime as dt
# import pandas as pd
def next_biz_day(d):
nd = d+dt.timedelta(days=1)
return nd if nd.weekday() in range(5) else next_biz_day(nd)
def prev_biz_day(d):
pd = d-dt.timedelta(days=1)
return pd if pd.weekday() in range(5) else prev_biz_day(pd)
def get_raw(s_name, start, end):
FILE_PATH, PATH_SEPERATOR = (os.environ.get('TEMP'), '\\') if sys.platform.startswith('win') else (r'/tmp', '/')
file_name = FILE_PATH + PATH_SEPERATOR + s_name + start + end + '.txt'
if os.path.isfile(file_name):
with open(file_name,'r') as f:
raw = pickle.load(f)
else:
raw = yhf.Share(s_name).get_historical(start,end)
with open(file_name,'w') as f:
pickle.dump(raw, f)
return raw
def get_s(s_name, start, end, field):
return [float(i[field]) for i in get_raw(s_name, start, end)][::-1]
def get_str(s_name, start, end, field):
return [str(i[field]) for i in get_raw(s_name, start, end)][::-1]
def get_diff(arr):
return [0] + [2.0*(arr[i+1] - arr[i])/(arr[i+1] + arr[i]) for i in range(len(arr) - 1)]
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-1.0 * z))
def nomalize(arr):
x = np.array(arr)
min, max = x[np.argmin(x)], x[np.argmax(x)]
return ((x - min) / (max - min))*2.0 -1
def average(arr, ndays):
a = [[arr[0]] * i + arr[:-i] if i>0 else arr for i in range(ndays)]
k = np.zeros_like(a[0])
for i in range(ndays):
k += np.array(a[i])
return np.array(k) / float(ndays)
def ave_n(n):
return lambda x:average(x, n)
def offset(arr, ndays):
a = [arr[0]] * ndays + arr[:-ndays]
return np.array(a)
def offset_n(n):
return lambda x:offset(x, n)
def merge_fs(fs):
return fs[0] if len(fs) == 1 else lambda *args: (merge_fs(fs[1:]))(fs[0](*args))
# --- Run parameters ---
x_names = 'MSFT|AAPL|GOOG|FB|INTC|AMZN|BIDU'.split('|')
y_name = 'BIDU'
percentage_for_training = 0.95
se_dates = [dt.datetime(*d) for d in [(2013,1,3), (2017,10,20)]]
print se_dates
input_start, input_end = [d.strftime('%Y-%m-%d') for d in se_dates]
se_dates = [next_biz_day(d) for d in se_dates]
print se_dates
predict_start, predict_end = [d.strftime('%Y-%m-%d') for d in se_dates]
# training dataset selection
lwfs = [
# label, weight, methods
('Close', 2.0, [get_s, nomalize, sigmoid]),
('Close', 5.0, [get_s, get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(1), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(2), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(3), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(4), nomalize, sigmoid]),
('Open', 3.0, [get_s, get_diff, nomalize, sigmoid]),
('High', 2.0, [get_s, get_diff, nomalize, sigmoid]),
('Low', 2.0, [get_s, get_diff, nomalize, sigmoid]),
('Volume', 1.0, [get_s, nomalize, sigmoid]),
('Volume', 1.0, [get_s, ave_n(5), nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(2), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(2), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(3), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(3), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(5), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(5), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(10), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(10), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(20), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(20), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(30), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(30), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(50), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(50), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(80), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(80), get_diff, nomalize, sigmoid]),
]
train_X_all = zip(*[w*(merge_fs(fs)(i, input_start, input_end, l)) for i in x_names for l,w,fs in lwfs])
train_Y_all = get_diff(get_s(y_name, predict_start, predict_end, 'Close'))
# train_Y_all_10 = [1 if i>0 else -1 for i in get_diff(get_s(y_name, predict_start, predict_end, 'Close'))]
xx1 = get_str(y_name, predict_start, predict_end, 'Date')
xx2 = get_s(y_name, predict_start, predict_end, 'Close')
print zip(xx1,xx2)[-10:]
print "Running for input X({0}) and Y({1})...".format(len(train_X_all), len(train_Y_all))
if len(train_X_all) != len(train_Y_all):
raise Exception("### Uneven input X({0}) and Y({1}), please Check!!!".format(len(train_X_all), len(train_Y_all)))
n_train_data = int(len(train_X_all)*percentage_for_training)
train_X, train_Y = train_X_all[30:n_train_data], train_Y_all[30:n_train_data]
test_X, test_Y = train_X_all[n_train_data:], train_Y_all[n_train_data:]
# fit and predict
def fit_and_predict(sklnr, train_X, train_Y, test_X):
sklnr.fit(train_X ,train_Y)
out_Y = sklnr.predict(test_X)
actual_vs_predict = zip(*[test_Y, out_Y])
matched_count = [1 if i[0]*i[1]>0 else 0 for i in actual_vs_predict]
accuracy = 1.0* sum(matched_count)/len(matched_count)
print 'Accuracy: {0}% Train({1}):Test({2}) - Model: {3}'.format(
int(accuracy*1000)/10.0,
len(train_Y),
len(test_Y),
str(sklnr).replace('\n','')[:140])
print 'output: {}'.format(actual_vs_predict[-10:])
# choose different learners
learner = [
# naive_bayes.GaussianNB(),
# linear_model.SGDClassifier(),
# svm.SVC(),
# tree.DecisionTreeClassifier(),
# ensemble.RandomForestClassifier(),
ensemble.AdaBoostRegressor(),
ensemble.BaggingRegressor(),
ensemble.ExtraTreesRegressor(),
ensemble.GradientBoostingRegressor(),
ensemble.RandomForestRegressor(),
gaussian_process.GaussianProcessRegressor(),
linear_model.HuberRegressor(),
linear_model.PassiveAggressiveRegressor(),
linear_model.RANSACRegressor(),
linear_model.SGDRegressor(),
linear_model.TheilSenRegressor(),
# multioutput.MultiOutputRegressor(),
neighbors.KNeighborsRegressor(),
neighbors.RadiusNeighborsRegressor(),
neural_network.MLPRegressor(),
tree.DecisionTreeRegressor(),
tree.ExtraTreeRegressor(),
### linear_model.SGDRegressor(),
### tree.DecisionTreeRegressor(),
### ensemble.RandomForestRegressor(),
### neural_network.MLPRegressor(activation='tanh', solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(15, 2), random_state=1)
# neural_network.MLPClassifier(activation='tanh', solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(15, 2), random_state=1)
]
# run
for l in learner:
try:
fit_and_predict(l, train_X, train_Y, test_X)
except:
pass
| gpl-3.0 |
mancoast/CPythonPyc_test | crash/271_test_argparse.py | 3 | 141317 | # Author: Steven J. Bethard <steven.bethard@gmail.com>.
import codecs
import inspect
import os
import shutil
import sys
import textwrap
import tempfile
import unittest
import argparse
from StringIO import StringIO
class StdIOBuffer(StringIO):
pass
from test import test_support
class TestCase(unittest.TestCase):
def assertEqual(self, obj1, obj2):
if obj1 != obj2:
print('')
print(repr(obj1))
print(repr(obj2))
print(obj1)
print(obj2)
super(TestCase, self).assertEqual(obj1, obj2)
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this assumption is true, unset COLUMNS.
env = test_support.EnvironmentVarGuard()
env.unset("COLUMNS")
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
while True:
try:
shutil.rmtree(self.temp_dir)
except WindowsError:
continue
else:
break
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
if getattr(result, key) is sys.stdout:
setattr(result, key, old_stdout)
if getattr(result, key) is sys.stderr:
setattr(result, key, old_stderr)
return result
except SystemExit:
code = sys.exc_info()[1].code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError("SystemExit", stdout, stderr, code)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
raises = tester.assertRaises
raises(ArgumentParserError, parser.parse_args, args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying the 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying the 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests the an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42, version='1.0')
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42)),
('a', NS(foo='a', bar=42, baz=42)),
('a b', NS(foo='a', bar=['b'], baz=42)),
('--baz', NS(foo=42, bar=42, baz=True)),
('a --baz', NS(foo='a', bar=42, baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeWB(TempDirMixin, ParserTestCase):
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers = parser.add_subparsers(help='command help')
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# return the main parser
return parser
def setUp(self):
super(TestAddSubparsers, self).setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
1 1 help
2 2 help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
try:
self.parser.parse_args(args_str.split())
except ArgumentParserError:
err = sys.exc_info()[1]
if err.stdout != expected_help:
print(repr(expected_help))
print(repr(err.stdout))
self.assertEqual(err.stdout, expected_help)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
optional arguments:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z [z ...]]
2 description
positional arguments:
z z help
optional arguments:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super(TestParentParsers, self).setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {} [-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
optional arguments:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(self.main_program)))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {} [-h] [-w W] [-x X] [-y Y | -z Z]
optional arguments:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(self.main_program)))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
optional arguments:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger [badger ...]]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger [badger ...])
'''
help = '''\
positional arguments:
badger BADGER
optional arguments:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
optional arguments:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--foo")
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertEqual('' in ns, False)
self.assertEqual('' not in ns, True)
self.assertEqual('x' in ns, False)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertEqual('x' in ns, True)
self.assertEqual('x' not in ns, False)
self.assertEqual('y' in ns, True)
self.assertEqual('' in ns, False)
self.assertEqual('xx' in ns, False)
self.assertEqual('z' in ns, False)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in tester.argument_signatures:
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_signatures = tester.argument_group_signatures
for group_sig, argument_sigs in group_signatures:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
if expected_text != parser_text:
print(repr(expected_text))
print(repr(parser_text))
for char1, char2 in zip(expected_text, parser_text):
if char1 != char2:
print('first diff: %r %r' % (char1, char2))
break
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help,version}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout'),
('version', 'stderr')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
optional arguments:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30,
version='V V'*30)
argument_signatures = [
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X [X ...]]] [-y [Y]] [-z Z Z Z]
a b b [c] [d [d ...]] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
optional arguments:
-h, --help show this help message and exit
-w W [W ...] w
-x [X [X ...]] x
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
optional arguments:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
optional arguments:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
--foo FOO foo help
'''
version = ''
class TestHelpVersionOptional(HelpTestCase):
"""Test that the --version argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', version='1.0')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
--foo FOO foo help
'''
version = '''\
1.0
'''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
optional arguments:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--baz BAZ] spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
optional arguments:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
parser = argparse.ArgumentParser()
try:
parser.add_argument("--foo", action="store-true")
except ValueError:
e = sys.exc_info()[1]
expected = 'unknown action'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
try:
parser.add_argument('bar', dest='baz')
except ValueError:
e = sys.exc_info()[1]
expected = 'dest supplied twice for positional argument'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def _get_error(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except ArgumentParserError:
return sys.exc_info()[1]
else:
self.assertRaises(ArgumentParserError, func, *args, **kwargs)
def assertPrintHelpExit(self, parser, args_str):
self.assertEqual(
parser.format_help(),
self._get_error(parser.parse_args, args_str.split()).stdout)
def assertPrintVersionExit(self, parser, args_str):
self.assertEqual(
parser.format_version(),
self._get_error(parser.parse_args, args_str.split()).stderr)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser(version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP', version='%(prog)s 3.5')
msg = self._get_error(parser.parse_args, ['-v']).stderr
self.assertEqual('PPP 3.5\n', msg)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False, version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
msg = self._get_error(parser.parse_args, ['-V']).stderr
self.assertEqual('XXX 3.7\n', msg)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertPrintVersionExit(parser, '-y')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser(version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertPrintVersionExit(parser, format % '-v')
self.assertPrintVersionExit(parser, format % '--version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(bar='spam', foo=42)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"version=None, formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with codecs.open(path, 'r', 'utf8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
try:
parser.parse_args(['XXX'])
except ArgumentParserError:
expected = 'usage: PROG x\nPROG: error: argument x: spam!\n'
msg = sys.exc_info()[1].stderr
self.assertEqual(expected, msg)
else:
self.fail()
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not name.startswith("_")
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
def test_main():
# silence warnings about version argument - these are expected
with test_support.check_warnings(
('The "version" argument to ArgumentParser is deprecated.',
DeprecationWarning),
('The (format|print)_version method is deprecated',
DeprecationWarning)):
test_support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
test_main()
| gpl-3.0 |
MSM8226-Samsung/kernel_samsung_msm8226 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/nltk/tokenize/treebank.py | 3 | 4749 | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Michael Heilman <mheilman@cmu.edu> (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed)
#
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
r"""
Penn Treebank Tokenizer
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
This implementation is a port of the tokenizer sed script written by Robert McIntyre
and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed.
"""
import re
from nltk.tokenize.api import TokenizerI
class TreebankWordTokenizer(TokenizerI):
"""
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.
This is the method that is invoked by ``word_tokenize()``. It assumes that the
text has already been segmented into sentences, e.g. using ``sent_tokenize()``.
This tokenizer performs the following steps:
- split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll``
- treat most punctuation characters as separate tokens
- split off commas and single quotes, when followed by whitespace
- separate periods that appear at the end of line
>>> from nltk.tokenize import TreebankWordTokenizer
>>> s = '''Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.'''
>>> TreebankWordTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.']
>>> s = "They'll save and invest more."
>>> TreebankWordTokenizer().tokenize(s)
['They', "'ll", 'save', 'and', 'invest', 'more', '.']
>>> s = "hi, my name can't hello,"
>>> TreebankWordTokenizer().tokenize(s)
['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ',']
"""
#starting quotes
STARTING_QUOTES = [
(re.compile(r'^\"'), r'``'),
(re.compile(r'(``)'), r' \1 '),
(re.compile(r'([ (\[{<])"'), r'\1 `` '),
]
#punctuation
PUNCTUATION = [
(re.compile(r'([:,])([^\d])'), r' \1 \2'),
(re.compile(r'([:,])$'), r' \1 '),
(re.compile(r'\.\.\.'), r' ... '),
(re.compile(r'[;@#$%&]'), r' \g<0> '),
(re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), r'\1 \2\3 '),
(re.compile(r'[?!]'), r' \g<0> '),
(re.compile(r"([^'])' "), r"\1 ' "),
]
#parens, brackets, etc.
PARENS_BRACKETS = [
(re.compile(r'[\]\[\(\)\{\}\<\>]'), r' \g<0> '),
(re.compile(r'--'), r' -- '),
]
#ending quotes
ENDING_QUOTES = [
(re.compile(r'"'), " '' "),
(re.compile(r'(\S)(\'\')'), r'\1 \2 '),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# List of contractions adapted from Robert MacIntyre's tokenizer.
CONTRACTIONS2 = [re.compile(r"(?i)\b(can)(not)\b"),
re.compile(r"(?i)\b(d)('ye)\b"),
re.compile(r"(?i)\b(gim)(me)\b"),
re.compile(r"(?i)\b(gon)(na)\b"),
re.compile(r"(?i)\b(got)(ta)\b"),
re.compile(r"(?i)\b(lem)(me)\b"),
re.compile(r"(?i)\b(mor)('n)\b"),
re.compile(r"(?i)\b(wan)(na) ")]
CONTRACTIONS3 = [re.compile(r"(?i) ('t)(is)\b"),
re.compile(r"(?i) ('t)(was)\b")]
CONTRACTIONS4 = [re.compile(r"(?i)\b(whad)(dd)(ya)\b"),
re.compile(r"(?i)\b(wha)(t)(cha)\b")]
def tokenize(self, text):
for regexp, substitution in self.STARTING_QUOTES:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PUNCTUATION:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PARENS_BRACKETS:
text = regexp.sub(substitution, text)
#add extra space to make things easier
text = " " + text + " "
for regexp, substitution in self.ENDING_QUOTES:
text = regexp.sub(substitution, text)
for regexp in self.CONTRACTIONS2:
text = regexp.sub(r' \1 \2 ', text)
for regexp in self.CONTRACTIONS3:
text = regexp.sub(r' \1 \2 ', text)
# We are not using CONTRACTIONS4 since
# they are also commented out in the SED scripts
# for regexp in self.CONTRACTIONS4:
# text = regexp.sub(r' \1 \2 \3 ', text)
return text.split()
| mit |
fbradyirl/home-assistant | homeassistant/components/lcn/sensor.py | 7 | 3835 | """Support for LCN sensors."""
import pypck
from homeassistant.const import CONF_ADDRESS, CONF_UNIT_OF_MEASUREMENT
from . import LcnDevice
from .const import (
CONF_CONNECTIONS,
CONF_SOURCE,
DATA_LCN,
LED_PORTS,
S0_INPUTS,
SETPOINTS,
THRESHOLDS,
VARIABLES,
)
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN sensor platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_SOURCE] in VARIABLES + SETPOINTS + THRESHOLDS + S0_INPUTS:
device = LcnVariableSensor(config, address_connection)
else: # in LED_PORTS + LOGICOP_PORTS
device = LcnLedLogicSensor(config, address_connection)
devices.append(device)
async_add_entities(devices)
class LcnVariableSensor(LcnDevice):
"""Representation of a LCN sensor for variables."""
def __init__(self, config, address_connection):
"""Initialize the LCN sensor."""
super().__init__(config, address_connection)
self.variable = pypck.lcn_defs.Var[config[CONF_SOURCE]]
self.unit = pypck.lcn_defs.VarUnit.parse(config[CONF_UNIT_OF_MEASUREMENT])
self._value = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.variable)
@property
def state(self):
"""Return the state of the entity."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.unit.value
def input_received(self, input_obj):
"""Set sensor value when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusVar)
or input_obj.get_var() != self.variable
):
return
self._value = input_obj.get_value().to_var_unit(self.unit)
self.async_schedule_update_ha_state()
class LcnLedLogicSensor(LcnDevice):
"""Representation of a LCN sensor for leds and logicops."""
def __init__(self, config, address_connection):
"""Initialize the LCN sensor."""
super().__init__(config, address_connection)
if config[CONF_SOURCE] in LED_PORTS:
self.source = pypck.lcn_defs.LedPort[config[CONF_SOURCE]]
else:
self.source = pypck.lcn_defs.LogicOpPort[config[CONF_SOURCE]]
self._value = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.source)
@property
def state(self):
"""Return the state of the entity."""
return self._value
def input_received(self, input_obj):
"""Set sensor value when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusLedsAndLogicOps):
return
if self.source in pypck.lcn_defs.LedPort:
self._value = input_obj.get_led_state(self.source.value).name.lower()
elif self.source in pypck.lcn_defs.LogicOpPort:
self._value = input_obj.get_logic_op_state(self.source.value).name.lower()
self.async_schedule_update_ha_state()
| apache-2.0 |
htautau/hhntup | higgstautau/filters.py | 1 | 29573 | import ROOT
from rootpy.tree.filtering import *
from rootpy.extern.hep import pdg
from rootpy import stl
VectorTLorentzVector = stl.vector("TLorentzVector")
Vector = stl.vector('float')
from itertools import ifilter
from math import *
from array import array as carray
from xaod import TOOLS
from . import datasets
# from .corrections import reweight_ggf
from .units import GeV
from .tautools import TauDecay
from . import utils
from . import store_helper
from . import log; log = log[__name__]
from goodruns import GRL
BCH_TOOLS = []
class GRLFilter(EventFilter):
def __init__(self, grl, **kwargs):
super(GRLFilter, self).__init__(**kwargs)
if isinstance(grl, GRL):
self.grl = grl
else:
self.grl = GRL(grl)
def passes(self, event):
if not self.grl:
return False
return (int(event.EventInfo.runNumber()), int(event.EventInfo.lumiBlock())) in self.grl
class GRLFilterOfficial(EventFilter):
# ALTERNATIVE USING OFFICIAL TOOL
def __init__(self, xml, **kwargs):
super(GRLFilterOfficial, self).__init__(**kwargs)
from ROOT import Root
reader = Root.TGoodRunsListReader(xml)
reader.Interpret()
self.grl = reader.GetMergedGRLCollection()
self.grl.Summary()
def passes(self, event):
return self.grl.HasRunLumiBlock(
int(event.EventInfo.runNumber()),
int(event.EventInfo.lumiBlock()))
def primary_vertex_selection(vxp):
return vxp.vertexType() == 1 and vxp.nTrackParticles() >= 4
def pileup_vertex_selection(vxp):
return vxp.vertexType() == 3 and vxp.nTrackParticles() >= 2
def vertex_selection(vxp):
""" Does the full primary and pileup vertex selection """
return primary_vertex_selection(vxp) or pileup_vertex_selection(vxp)
class PriVertex(EventFilter):
def passes(self, event):
event.vertices.select(vertex_selection)
return any(ifilter(primary_vertex_selection, event.vertices))
class CoreFlags(EventFilter):
def passes(self, event):
Core = event.EventInfo.Core
return event.EventInfo.errorState(Core) == 0
class NvtxJets(EventFilter):
def __init__(self, tree, **kwargs):
super(NvtxJets, self).__init__(**kwargs)
self.tree = tree
def passes(self, event):
# Check for a good primary vertex
# This is needed for jet and soft term systematics
goodPV = False
nvtxsoftmet = 0
nvtxjets = 0
for vertex in event.vertices:
if vertex.vertexType() == 1 and vertex.nTrackParticles() > 2 and abs(vertex.z()) < 200:
goodPV = True
if goodPV:
for vertex in event.vertices:
if vertex.nTrackParticles() > 2:
nvtxsoftmet += 1
if vertex.nTrackParticles() > 1:
nvtxjets += 1
self.tree.nvtxsoftmet = nvtxsoftmet
self.tree.nvtxjets = nvtxjets
return True
class BCHCleaning(EventFilter):
# NOT CONVERTED TO XAOD YET
"""
https://twiki.cern.ch/twiki/bin/view/AtlasProtected/BCHCleaningTool
"""
def __init__(self, tree, passthrough, datatype, **kwargs):
if not passthrough:
# from externaltools import TileTripReader
# from externaltools import BCHCleaningTool
from ROOT import Root
from ROOT import BCHTool
self.tree = tree
self.datatype = datatype
self.tiletool = Root.TTileTripReader()
self.tiletool.setTripFile(TileTripReader.get_resource("CompleteTripList_2011-2012.root"))
self.bchtool_data = BCHTool.BCHCleaningToolRoot()
self.bchtool_mc = BCHTool.BCHCleaningToolRoot()
self.bchtool_data.InitializeTool(
True, self.tiletool, BCHCleaningTool.get_resource("FractionsRejectedJetsMC.root"))
self.bchtool_mc.InitializeTool(
False, self.tiletool, BCHCleaningTool.get_resource("FractionsRejectedJetsMC.root"))
BCH_TOOLS.append(self.bchtool_data)
BCH_TOOLS.append(self.bchtool_mc)
super(BCHCleaning, self).__init__(passthrough=passthrough, **kwargs)
def passes(self, event):
if self.datatype in (datasets.DATA, datasets.MC, datasets.MCEMBED):
if self.datatype == datasets.DATA:
jet_tool = self.bchtool_data
runnumber = event.EventInfo.runNumber()
lbn = event.EventInfo.lumiBlock()
else:
jet_tool = self.bchtool_mc
runnumber = self.tree.RunNumber
lbn = self.tree.lbn
#jet_tool.SetSeed(314159 + event.EventNumber * 2)
for jet in event.jets:
jet.BCHMedium = jet_tool.IsBadMediumBCH(
runnumber, lbn, jet.eta(), jet.phi(),
jet.auxdataConst('float')('BchCorrCell'),
jet.auxdataConst('float')('EMFrac'), jet.pt())
jet.BCHTight = jet_tool.IsBadTightBCH(
runnumber, lbn, jet.eta(), jet.phi(),
jet.auxdataConst('float')('BchCorrCell'),
jet.auxdataConst('float')('EMFrac'), jet.pt())
for tau in event.taus:
tau.BCHMedium = jet_tool.IsBadMediumBCH(
runnumber, lbn, tau.jet().eta(), tau.jet().phi(),
tau.jet().auxdataConst('float')('BchCorrCell'),
tau.jet().auxdataConst('float')('EMFrac'), tau.jet().pt())
tau.BCHTight = jet_tool.IsBadTightBCH(
runnumber, lbn, tau.jet().eta(), tau.jet().phi(),
tau.jet().auxdataConst('float')('BchCorrCell'),
tau.jet().auxdataConst('float')('EMFrac'), tau.jet().pt())
elif self.datatype == datasets.EMBED:
# Do truth-matching to find out if MC taus
#self.bchtool_data.SetSeed(314159 + event.EventNumber * 2)
#self.bchtool_mc.SetSeed(314159 + event.EventNumber * 3)
runnumber = event.RunNumber
lbn = event.lbn
for jet in event.jets:
if jet.matched:
jet_tool = self.bchtool_mc
else:
jet_tool = self.bchtool_data
jet.BCHMedium = jet_tool.IsBadMediumBCH(
runnumber, lbn, jet.eta(), jet.phi(),
jet.auxdataConst('float')('BchCorrCell'),
jet.auxdataConst('float')('EMFrac'), jet.pt())
jet.BCHTight = jet_tool.IsBadTightBCH(
runnumber, lbn, jet.eta(), jet.phi(),
jet.auxdataConst('float')('BchCorrCell'),
jet.auxdataConst('float')('EMFrac'), jet.pt())
for tau in event.taus:
if tau.matched:
jet_tool = self.bchtool_mc
else:
jet_tool = self.bchtool_data
tau.BCHMedium = jet_tool.IsBadMediumBCH(
runnumber, lbn, tau.jet().eta(), tau.jet().phi(),
tau.jet().auxdataConst('float')('BchCorrCell'),
tau.jet().auxdataConst('float')('EMFrac'), tau.jet().pt())
tau.BCHTight = jet_tool.IsBadTightBCH(
runnumber, lbn, tau.jet().eta(), tau.jet().phi(),
tau.jet().auxdataConst('float')('BchCorrCell'),
tau.jet().auxdataConst('float')('EMFrac'), tau.jet().pt())
return True
class TileTrips(EventFilter):
"""
https://twiki.cern.ch/twiki/bin/viewauth/Atlas/DataPreparationCheckListForPhysicsAnalysis#Rejection_of_bad_corrupted_event
"""
def __init__(self, passthrough=False, **kwargs):
if not passthrough:
#from externaltools import TileTripReader
from ROOT import Root
self.tool = Root.TTileTripReader()
super(TileTrips, self).__init__(passthrough=passthrough, **kwargs)
def passes(self, event):
return self.tool.checkEvent(
event.EventInfo.runNumber(),
event.EventInfo.lumiBlock(),
event.EventInfo.eventNumber())
class JetCleaning(EventFilter):
BAD_TILE = [
202660, 202668, 202712, 202740, 202965, 202987, 202991, 203027, 203169
]
def __init__(self,
datatype,
year,
pt_thresh=20*GeV,
eta_max=4.5,
**kwargs):
super(JetCleaning, self).__init__(**kwargs)
self.year = year
self.datatype = datatype
self.pt_thresh = pt_thresh
self.eta_max = eta_max
from ROOT import JetCleaningTool
self.tool = JetCleaningTool(JetCleaningTool.LooseBad)
def passes(self, event):
# using LC jets
for jet in event.jets:
if jet.pt() <= self.pt_thresh or abs(jet.eta()) >= self.eta_max:
continue
if self.tool.accept(jet):
return False
if (self.datatype in (datasets.DATA, datasets.EMBED)) and self.year == 2012:
# https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/HowToCleanJets2012
# Hot Tile calorimeter in period B1 and B2
if event.EventInfo.runNumber() in JetCleaning.BAD_TILE:
# recommendation is to use EM jets
for jet in event.jets_EM:
_etaphi28 = (
-0.2 < jet.eta() < -0.1 and
2.65 < jet.phi() < 2.75)
FracSamplingMax = jet.auxdataConst('float')('FracSamplingMax')
SamplingMax = jet.auxdataConst('int')('FracSamplingMaxIndex')
if FracSamplingMax > 0.6 and SamplingMax == 13 and _etaphi28:
return False
return True
class LArError(EventFilter):
def passes(self, event):
LAr = event.EventInfo.LAr
return event.EventInfo.errorState(LAr) == 0
class TileError(EventFilter):
def passes(self, event):
Tile = event.EventInfo.Tile
return event.EventInfo.errorState(Tile) == 0
class JetCrackVeto(EventFilter):
def passes(self, event):
for jet in event.jets:
if jet.pt() <= 20 * GeV:
continue
if 1.3 < abs(jet.eta()) < 1.7:
return False
return True
class TauElectronVeto(EventFilter):
def __init__(self, min_taus, **kwargs):
super(TauElectronVeto, self).__init__(**kwargs)
self.min_taus = min_taus
def passes(self, event):
#https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/TauRecommendationsWinterConf2013#Electron_veto
# only apply eveto on 1p taus with cluster and track eta less than 2.47
# Eta selection already applied by TauEta filter
event.taus.select(lambda tau:
tau.nTracks() > 1 or
tau.isTau(ROOT.xAOD.TauJetParameters.EleBDTLoose) == 0)
return len(event.taus) >= self.min_taus
class TauMuonVeto(EventFilter):
def __init__(self, min_taus, **kwargs):
super(TauMuonVeto, self).__init__(**kwargs)
self.min_taus = min_taus
def passes(self, event):
event.taus.select(lambda tau: tau.isTau(ROOT.xAOD.TauJetParameters.MuonVeto) == 0)
return len(event.taus) >= self.min_taus
class TauHasTrack(EventFilter):
def __init__(self, min_taus, **kwargs):
super(TauHasTrack, self).__init__(**kwargs)
self.min_taus = min_taus
def passes(self, event):
event.taus.select(lambda tau: tau.nTracks() > 0)
return len(event.taus) >= self.min_taus
class TauPT(EventFilter):
def __init__(self, min_taus, thresh=20 * GeV, **kwargs):
self.min_taus = min_taus
self.thresh = thresh
super(TauPT, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(lambda tau: tau.pt() > self.thresh)
return len(event.taus) >= self.min_taus
class TauEta(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(TauEta, self).__init__(**kwargs)
def passes(self, event):
# both calo and leading track eta within 2.47
event.taus.select(lambda tau:
abs(tau.eta()) < 2.47 and
abs(tau.track(0).eta()) < 2.47)
return len(event.taus) >= self.min_taus
def jvf_selection(tau):
if abs(tau.track(0).eta()) > 2.1:
return True
else:
jvf = 0
jvf_vec = tau.jet().auxdataConst('std::vector<float, std::allocator<float> >')('JVF')
if not jvf_vec.empty():
jvf = jvf_vec[0]
return jvf > .5
class TauJVF(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
self.filter_func = jvf_selection
super(TauJVF, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(self.filter_func)
return len(event.taus) >= self.min_taus
class Tau1Track3Track(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(Tau1Track3Track, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(lambda tau: tau.nTracks() in (1, 3))
return len(event.taus) >= self.min_taus
class Tau1P3P(EventFilter):
"""
Only keep 1P + 3P and 3P + 3P
"""
def passes(self, event):
assert len(event.taus) == 2
tau1, tau2 = event.taus
# 1P + 3P
if (tau1.nTracks() == 1 and tau2.nTracks() == 3) or \
(tau1.nTracks() == 3 and tau2.nTracks() == 1):
return True
# 3P + 3P
if tau1.nTracks() == 3 and tau2.nTracks() == 3:
return True
return False
class TauCharge(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(TauCharge, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(lambda tau: abs(tau.charge()) == 1)
return len(event.taus) >= self.min_taus
class TauIDLoose(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(TauIDLoose, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(lambda tau: tau.isTau(ROOT.xAOD.TauJetParameters.JetBDTSigLoose) == 1)
return len(event.taus) >= self.min_taus
class TauIDMedium(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(TauIDMedium, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(lambda tau: tau.isTau(ROOT.xAOD.TauJetParameters.JetBDTSigMedium) == 1)
return len(event.taus) >= self.min_taus
class TauCrack(EventFilter):
def __init__(self, min_taus, **kwargs):
self.min_taus = min_taus
super(TauCrack, self).__init__(**kwargs)
def passes(self, event):
event.taus.select(
lambda tau: not (
1.37 <= abs(tau.track(0).eta()) <= 1.52))
return len(event.taus) >= self.min_taus
class TrueTauSelection(EventFilter):
"""
True tau selection from the truth particle container
using the official tool (does it work for all the generators ?)
"""
def __init__(self, passthrough=False, **kwargs):
super(TrueTauSelection, self).__init__(
passthrough=passthrough, **kwargs)
if not passthrough:
from ROOT.TauAnalysisTools import TauTruthMatchingTool
self.tau_truth_tool = TauTruthMatchingTool('tau_truth_tool')
# Should add an argument for the sample type
self.tau_truth_tool.initialize()
truth_matching_tool = self.tau_truth_tool
def passes(self, event):
self.tau_truth_tool.setTruthParticleContainer(event.truetaus.collection)
self.tau_truth_tool.createTruthTauContainer()
truth_taus = self.tau_truth_tool.getTruthTauContainer()
truth_taus_aux = self.tau_truth_tool.getTruthTauAuxContainer()
truth_taus.setNonConstStore(truth_taus_aux)
event.truetaus.collection = truth_taus
# OLD METHOD using the edm itself
# event.truetaus.select(lambda p: p.isTau() and p.status() in (2,))
return True
class TruthMatching(EventFilter):
def __init__(self, passthrough=False, **kwargs):
super(TruthMatching, self).__init__(
passthrough=passthrough, **kwargs)
if not passthrough:
# This implies that this filter is ALWAYS applied after
# the TrueTauSelection
self.tau_truth_tool = TOOLS.get('tau_truth_tool')
def passes(self, event):
for tau in event.taus:
tau.matched_dr = 1111.
tau.matched_object = None
self.tau_truth_tool.setTruthParticleContainer(event.mc.collection)
true_tau = self.tau_truth_tool.applyTruthMatch(tau)
tau.matched = tau.auxdataConst('bool')('IsTruthMatched')
if tau.matched:
tau.matched_object = true_tau
tau.matched_dr = utils.dR(
tau.eta(), tau.phi(),
true_tau.auxdataConst('double')('eta_vis'),
true_tau.auxdataConst('double')('phi_vis'))
return True
class RecoJetTrueTauMatching(EventFilter):
"""
To use after the TrueTauSelection filter
"""
def passes(self, event):
for jet in event.jets:
jet.matched = False
jet.matched_dr = 1111.
jet.matched_object = None
for p in event.truetaus:
dr = utils.dR(
jet.eta(), jet.phi(),
p.auxdataConst('double')('eta_vis'),
p.auxdataConst('double')('phi_vis'))
if dr < 0.2:
# TODO: handle possible collision!
jet.matched = True
jet.matched_dr = dr
jet.matched_object = truetau
break
return True
class TauCalibration(EventFilter):
"""
Apply Energy shift in data and
systematic variation in MC (Not yet)
"""
def __init__(self, datatype, **kwargs):
super(TauCalibration, self).__init__(**kwargs)
self.datatype = datatype
from ROOT.TauAnalysisTools import TauSmearingTool
self.tool = TauSmearingTool('tau_smearing_tool')
self.tool.setProperty('bool')(
'IsData', self.datatype == datasets.DATA)
def passes(self, event):
taus_copy = store_helper.shallowCopyTauJetContainer(event.taus.collection)
for tau in taus_copy:
self.tool.applyCorrection(tau)
event.taus.collection = taus_copy
return True
class TauJetOverlapRemoval(EventFilter):
"""
Precendence: taus > jets
"""
def __init__(self, dr=.2, **kwargs):
super(TauJetOverlapRemoval, self).__init__(**kwargs)
self.dr = dr
def passes(self, event):
# remove overlap with taus
event.jets.select(lambda jet:
not any([tau for tau in event.taus if
(utils.dR(jet.eta(), jet.phi(), tau.eta(), tau.phi()) < self.dr)]))
return True
class NumJets25(EventFilter):
def __init__(self, tree, **kwargs):
super(NumJets25, self).__init__(**kwargs)
self.tree = tree
def passes(self, event):
self.tree.numJets25 = len([j for j in event.jets if
j.pt() > 25 * GeV and abs(j.eta()) < 4.5])
return True
class NonIsolatedJet(EventFilter):
"""
https://indico.cern.ch/getFile.py/access?contribId=1&resId=0&materialId=slides&confId=200403
"""
def __init__(self, tree, **kwargs):
super(NonIsolatedJet, self).__init__(**kwargs)
self.tree = tree
def passes(self, event):
# only write flag instead of vetoing the event so this
# can be turned on and off after
self.tree.nonisolatedjet = False
for tau in event.taus:
for jet in event.jets:
if 0.4 < utils.dR(tau.eta(), tau.phi(), jet.eta(), jet.phi()) < 1.0:
self.tree.nonisolatedjet = True
return True
def jet_selection_2011(jet):
""" Finalizes the jet selection """
if not (jet.pt() > 25 * GeV):
return False
if not (abs(jet.eta()) < 4.5):
return False
# suppress forward jets
if (abs(jet.eta()) > 2.4) and not (jet.pt() > 30 * GeV):
return False
# JVF cut on central jets
#if (abs(jet.eta) < 2.4) and not (abs(jet.jvtxf) > 0.75):
# return False
# NO JVFUncertaintyTool for 2011!
return True
def jet_selection_2012(jet):
""" Finalizes the jet selection
https://cds.cern.ch/record/1472547/files/ATL-COM-PHYS-2012-1202.pdf
"""
if not (jet.pt() > 30 * GeV):
return False
if not (abs(jet.eta()) < 4.5):
return False
# suppress forward jets
if (abs(jet.eta()) > 2.4) and not (jet.pt() > 35 * GeV):
return False
# JVF cut on central jets below 50 GeV
if (jet.pt() < 50 * GeV) and (abs(jet.eta()) < 2.4):
jvf_vec = jet.auxdataConst('std::vector<float, std::allocator<float> >')('JVF')
jvf = 0 if jvf_vec.empty() else jvf_vec[0]
if not (abs(jvf) > 0.5):
return False
return True
class JetSelection(EventFilter):
"""Selects jets of good quality, keep event in any case"""
def __init__(self, year, **kwargs):
if year == 2011:
self.filter_func = jet_selection_2011
elif year == 2012:
self.filter_func = jet_selection_2012
else:
raise ValueError("No jet selection defined for year %d" % year)
super(JetSelection, self).__init__(**kwargs)
def passes(self, event):
event.jets.select(self.filter_func)
return True
class JetPreselection(EventFilter):
def passes(self, event):
event.jets.select(lambda jet: jet.pt() > 20 * GeV)
return True
class MCWeight(EventFilter):
# NOT FULLY CONVERTED TO XAOD YET
def __init__(self, datatype, tree, **kwargs):
self.datatype = datatype
self.tree = tree
super(MCWeight, self).__init__(**kwargs)
def passes(self, event):
# set the event weights
if self.datatype == datasets.MC:
truth_event = event.TruthEvent[0]
self.tree.mc_weight = event.EventInfo.mcEventWeight()
val_i = ROOT.Long(0)
if truth_event.pdfInfoParameter(val_i, truth_event.PDFID1):
self.tree.mcevent_pdf_id1_0 = val_i
if truth_event.pdfInfoParameter(val_i, truth_event.PDFID2):
self.tree.mcevent_pdf_id2_0 = val_i
val_f = carray('f', [0.])
if truth_event.pdfInfoParameter(val_f, truth_event.X1):
self.tree.mcevent_pdf_x1_0 = val_f[0]
if truth_event.pdfInfoParameter(val_f, truth_event.X2):
self.tree.mcevent_pdf_x2_0 = val_f[0]
if truth_event.pdfInfoParameter(val_f, truth_event.scalePDF):
self.tree.mcevent_pdf_scale_0 = val_f[0]
elif self.datatype == datasets.EMBED:
self.tree.mc_weight = event.EventInfo.mcEventWeight()
return True
class ggFReweighting(EventFilter):
# NOT CONVERTED TO XAOD YET
def __init__(self, dsname, tree, **kwargs):
self.dsname = dsname
self.tree = tree
super(ggFReweighting, self).__init__(**kwargs)
def passes(self, event):
# self.tree.ggf_weight = reweight_ggf(event, self.dsname)
return True
class JetIsPileup(EventFilter):
# NOT converted to XAOD yet
"""
must be applied before any jet selection
"""
def __init__(self, **kwargs):
super(JetIsPileup, self).__init__(**kwargs)
if not self.passthrough:
# from externaltools import JVFUncertaintyTool as JVFUncertaintyTool2012
from ROOT import JVFUncertaintyTool
self.tool = JVFUncertaintyTool("AntiKt4LCTopo")
def passes(self, event):
# collect truth jets
truejets = VectorTLorentzVector()
for truejet in event.truejets:
if truejet.pt() > 10e3:
truejets.push_back(truejet.p4())
# test each jet
for jet in event.jets:
ispileup = self.tool.isPileUpJet(jet.p4(), truejets)
jet.ispileup = ispileup
return True
class JetCopy(EventFilter):
# NOT CONVERTED TO XAOD YET
# NOT NEEDED ANYMORE
def __init__(self, tree, **kwargs):
super(JetCopy, self).__init__(**kwargs)
self.tree = tree
def passes(self, event):
tree = self.tree
tree.jet_E_original.clear()
tree.jet_m_original.clear()
tree.jet_pt_original.clear()
tree.jet_eta_original.clear()
tree.jet_phi_original.clear()
for jet in event.jets:
tree.jet_E_original.push_back(jet.E)
tree.jet_m_original.push_back(jet.m)
tree.jet_pt_original.push_back(jet.pt)
tree.jet_eta_original.push_back(jet.eta)
tree.jet_phi_original.push_back(jet.phi)
return True
class HiggsPT(EventFilter):
# NOT CONVERTED TO XAOD YET
def __init__(self, year, tree, **kwargs):
super(HiggsPT, self).__init__(**kwargs)
self.tree = tree
if year == 2011:
self.status = (2, 10902, 62)
elif year == 2012:
self.status = (62, 195)
else:
raise ValueError("No HiggsPT defined for year {0}".format(year))
def passes(self, event):
pt = 0
higgs = None
status = self.status
# find the Higgs
for mc in event.mc:
if mc.pdgId() == 25 and mc.status() in status:
pt = mc.pt()
higgs = mc
break
if higgs is None:
raise RuntimeError("Higgs not found!")
self.tree.true_resonance_pt = pt
# Only consider taus here since there are very soft photons radiated
# off the taus but included as children of the Higgs
vertex = higgs.decayVtx()
children = [
vertex.outgoingParticle(i) for i in
xrange(vertex.nOutgoingParticles())]
true_taus = [TauDecay(mc).fourvect_visible
for mc in children
if mc.pdgId() in (pdg.tau_plus, pdg.tau_minus)
and mc.status() in (2, 11, 195)]
# The number of anti kt R = 0.4 truth jets with pT>25 GeV, not
# originating from the decay products of the Higgs boson.
# Start from the AntiKt4Truth collection. Reject any jet with pT<25
# GeV. Reject any jet withing dR < 0.4 of any electron, tau, photon or
# parton (directly) produced in the Higgs decay.
jets = [jet for jet in event.truejets if jet.pt() >= 25 * GeV
and not any([tau for tau in true_taus if
utils.dR(jet.eta(), jet.phi(),
tau.Eta(), tau.Phi()) < 0.4])]
# Count the number of remaining jets
self.tree.num_true_jets_no_overlap = len(jets)
if len(jets) >=2:
jet1, jet2 = jets[:2]
self.tree.true_jet1_no_overlap_pt = jet1.pt()
self.tree.true_jet2_no_overlap_pt = jet2.pt()
self.tree.true_dEta_jet1_jet2_no_overlap = abs(jet1.eta() - jet2.eta())
self.tree.true_mass_jet1_jet2_no_overlap = (jet1.p4() + jet2.p4()).M()
self.tree.true_dphi_jj_higgs_no_overlap = abs(utils.dphi(higgs.phi(), (jet1.p4() + jet2.p4()).Phi()))
return True
class BCHSampleRunNumber(EventFilter):
# NOT CONVERTED TO XAOD YET
"""
d3pd.RunNumber=195848 tells the tool that the sample was made with mc12b
pileup conditions. Our BCH samples were made with identical pileup
conditions, but for reasons unclear, they were assigned
d3pd.RunNumber=212399, and the pileup tool does not know what to do with
this RunNumber.
"""
def passes(self, event):
event.RunNumber = 195848
return True
class ClassifyInclusiveHiggsSample(EventFilter):
# NOT CONVERTED TO XAOD YET
UNKNOWN, TAUTAU, WW, ZZ, BB = range(5)
def __init__(self, tree, **kwargs):
super(ClassifyInclusiveHiggsSample, self).__init__(**kwargs)
self.tree = tree
def passes(self, event):
higgs = None
# find the Higgs
for mc in event.mc:
if mc.pdgId() == 25 and mc.status() == 62:
pt = mc.pt()
higgs = mc
break
if higgs is None:
raise RuntimeError("Higgs not found!")
decay_type = self.UNKNOWN
# check pdg id of children
# for mc in higgs.iter_children():
# if mc.pdgId in (pdg.tau_minus, pdg.tau_plus):
# decay_type = self.TAUTAU
# break
# elif mc.pdgId in (pdg.W_minus, pdg.W_plus):
# decay_type = self.WW
# break
# elif mc.pdgId == pdg.Z0:
# decay_type = self.ZZ
# break
# elif mc.pdgId in (pdg.b, pdg.anti_b):
# decay_type = self.BB
# break
# self.tree.higgs_decay_channel = decay_type
return True
| gpl-3.0 |
renesugar/arrow | python/pyarrow/tests/test_strategies.py | 3 | 1609 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hypothesis as h
import pyarrow as pa
import pyarrow.tests.strategies as past
@h.given(past.all_types)
def test_types(ty):
assert isinstance(ty, pa.lib.DataType)
@h.given(past.all_fields)
def test_fields(field):
assert isinstance(field, pa.lib.Field)
@h.given(past.all_schemas)
def test_schemas(schema):
assert isinstance(schema, pa.lib.Schema)
@h.given(past.all_arrays)
def test_arrays(array):
assert isinstance(array, pa.lib.Array)
@h.given(past.all_chunked_arrays)
def test_chunked_arrays(chunked_array):
assert isinstance(chunked_array, pa.lib.ChunkedArray)
@h.given(past.all_record_batches)
def test_record_batches(record_bath):
assert isinstance(record_bath, pa.lib.RecordBatch)
@h.given(past.all_tables)
def test_tables(table):
assert isinstance(table, pa.lib.Table)
| apache-2.0 |
johnbachman/belpy | indra/explanation/model_checker/model_checker.py | 1 | 24407 | import logging
import textwrap
from copy import deepcopy
import numpy as np
import networkx as nx
from indra.explanation.pathfinding import get_path_iter, find_sources
try:
import paths_graph as pg
has_pg = True
except ImportError:
has_pg = False
logger = logging.getLogger(__name__)
class PathMetric(object):
"""Describes results of simple path search (path existence).
Attributes
----------
source_node : str
The source node of the path
target_node : str
The target node of the path
length : int
The length of the path
"""
def __init__(self, source_node, target_node, length):
self.source_node = source_node
self.target_node = target_node
self.length = length
def __repr__(self):
return str(self)
def __str__(self):
return ('source_node: %s, target_node: %s, length: %d' %
(self.source_node, self.target_node, self.length))
class PathResult(object):
"""Describes results of running the ModelChecker on a single Statement.
Attributes
----------
path_found : bool
True if a path was found, False otherwise.
result_code : string
- *STATEMENT_TYPE_NOT_HANDLED* - The provided statement type is not
handled
- *SUBJECT_MONOMERS_NOT_FOUND* or *SUBJECT_NOT_FOUND* -
Statement subject not found in model
- *OBSERVABLES_NOT_FOUND* or *OBJECT_NOT_FOUND* -
Statement has no associated observable
- *NO_PATHS_FOUND* - Statement has no path for any observable
- *MAX_PATH_LENGTH_EXCEEDED* - Statement has no path len <=
MAX_PATH_LENGTH
- *PATHS_FOUND* - Statement has path len <= MAX_PATH_LENGTH
- *INPUT_RULES_NOT_FOUND* - No rules with Statement subject found
- *MAX_PATHS_ZERO* - Path found but MAX_PATHS is set to zero
max_paths : int
The maximum number of specific paths to return for each Statement
to be explained.
max_path_length : int
The maximum length of specific paths to return.
path_metrics : list[:py:class:`indra.explanation.model_checker.PathMetric`]
A list of PathMetric objects, each describing the results of a simple
path search (path existence).
paths : list[list[tuple[str, int]]]
A list of paths obtained from path finding. Each path is a list of
tuples (which are edges in the path), with the first element of the
tuple the name of a rule, and the second element its polarity in the
path.
"""
def __init__(self, path_found, result_code, max_paths, max_path_length):
self.path_found = path_found
self.result_code = result_code
self.max_paths = max_paths
self.max_path_length = max_path_length
self.path_metrics = []
self.paths = []
def add_path(self, path):
self.paths.append(path)
def add_metric(self, path_metric):
self.path_metrics.append(path_metric)
def __str__(self):
summary = textwrap.dedent("""
PathResult:
path_found: {path_found}
result_code: {result_code}
path_metrics: {path_metrics}
paths: {paths}
max_paths: {max_paths}
max_path_length: {max_path_length}""")
ws = '\n '
# String representation of path metrics
if not self.path_metrics:
pm_str = str(self.path_metrics)
else:
pm_str = ws + ws.join(['%d: %s' % (pm_ix, pm) for pm_ix, pm in
enumerate(self.path_metrics)])
def format_path(path, num_spaces=11):
path_ws = '\n' + (' ' * num_spaces)
return path_ws.join([str(p) for p in path])
# String representation of paths
if not self.paths:
path_str = str(self.paths)
else:
path_str = ws + ws.join(['%d: %s' % (p_ix, format_path(p))
for p_ix, p in enumerate(self.paths)])
return summary.format(path_found=self.path_found,
result_code=self.result_code,
max_paths=self.max_paths,
max_path_length=self.max_path_length,
path_metrics=pm_str, paths=path_str)
def __repr__(self):
return str(self)
class ModelChecker(object):
"""The parent class of all ModelCheckers.
Parameters
----------
model : pysb.Model or indra.assemblers.indranet.IndraNet or PyBEL.Model
Depending on the ModelChecker class, can be different type.
statements : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to check the model against.
do_sampling : bool
Whether to use breadth-first search or weighted sampling to
generate paths. Default is False (breadth-first search).
seed : int
Random seed for sampling (optional, default is None).
nodes_to_agents : dict
A dictionary mapping nodes of intermediate signed edges graph to INDRA
agents.
Attributes
----------
graph : nx.Digraph
A DiGraph with signed nodes to find paths in.
"""
def __init__(self, model, statements=None, do_sampling=False, seed=None,
nodes_to_agents=None):
self.model = model
if statements:
self.statements = statements
else:
self.statements = []
if seed is not None:
np.random.seed(seed)
self.nodes_to_agents = nodes_to_agents if nodes_to_agents else {}
# Whether to do sampling
self.do_sampling = do_sampling
self.graph = None
def add_statements(self, stmts):
"""Add to the list of statements to check against the model.
Parameters
----------
stmts : list[indra.statements.Statement]
The list of Statements to be added for checking.
"""
self.statements += stmts
def check_model(self, max_paths=1, max_path_length=5,
agent_filter_func=None):
"""Check all the statements added to the ModelChecker.
Parameters
----------
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
agent_filter_func : Optional[function]
A function to constrain the intermediate nodes in the path. A
function should take an agent as a parameter and return True if the
agent is allowed to be in a path and False otherwise.
Returns
-------
list of (Statement, PathResult)
Each tuple contains the Statement checked against the model and
a PathResult object describing the results of model checking.
"""
results = []
# Convert agent filter function to node filter function once here
node_filter_func = self.update_filter_func(agent_filter_func)
for idx, stmt in enumerate(self.statements):
logger.info('---')
logger.info('Checking statement (%d/%d): %s' %
(idx + 1, len(self.statements), stmt))
result = self.check_statement(stmt, max_paths, max_path_length,
node_filter_func=node_filter_func)
results.append((stmt, result))
return results
def check_statement(self, stmt, max_paths=1, max_path_length=5,
agent_filter_func=None, node_filter_func=None):
"""Check a single Statement against the model.
Parameters
----------
stmt : indra.statements.Statement
The Statement to check.
max_paths : Optional[int]
The maximum number of specific paths to return for each Statement
to be explained. Default: 1
max_path_length : Optional[int]
The maximum length of specific paths to return. Default: 5
agent_filter_func : Optional[function]
A function to constrain the intermediate nodes in the path. A
function should take an agent as a parameter and return True if the
agent is allowed to be in a path and False otherwise.
node_filter_func : Optional[function]
Similar to agent_filter_func but it takes a node as a parameter
instead of agent. If not provided, node_filter_func will be
generated from agent_filter_func.
Returns
-------
result : indra.explanation.modelchecker.PathResult
A PathResult object containing the result of a test.
"""
input_set, obj_list, result_code = self.get_all_subjects_objects(stmt)
if result_code:
return self.make_false_result(result_code, max_paths,
max_path_length)
# If source and target are the same, we need to handle a loop
loop = False
if (input_set and (len(input_set) == len(obj_list) == 1) and
(list(input_set)[0] == list(obj_list)[0])):
loop = True
# Convert agent filter function to node filter function
if agent_filter_func and not node_filter_func:
node_filter_func = self.update_filter_func(agent_filter_func)
# If we have several objects in obj_list or we have a loop, we add a
# dummy target node as a child to all nodes in obj_list
if len(obj_list) > 1 or loop:
common_target = ('common_target', 0)
self.graph.add_node(common_target)
# This is the case when source and target are the same. NetworkX
# does not allow loops in the paths, so we work around it by using
# target predecessors as new targets
if loop:
for obj in self.graph.predecessors(list(obj_list)[0]):
self.graph.add_edge(obj, common_target)
else:
for obj in obj_list:
self.graph.add_edge(obj, common_target)
result = self.find_paths(input_set, common_target, max_paths,
max_path_length, loop, dummy_target=True,
filter_func=node_filter_func)
self.graph.remove_node(common_target)
else:
result = self.find_paths(input_set, list(obj_list)[0], max_paths,
max_path_length, loop, dummy_target=False,
filter_func=node_filter_func)
if result.path_found:
logger.info('Found paths for %s' % stmt)
return result
# If we got here, then there was no path for any observable
logger.info('No paths found for %s' % stmt)
return self.make_false_result('NO_PATHS_FOUND',
max_paths, max_path_length)
def get_all_subjects_objects(self, stmt):
# Make sure graph is created
self.get_graph()
# Extract subject and object info from test statement
subj_list, obj_list, result_code = self.process_statement(stmt)
if result_code:
return None, None, result_code
# This is the case if we are checking a Statement whose
# subject is genuinely None
if all(s is None for s in subj_list):
input_set = None
# This is the case where the Statement has an actual subject
# but we may still run into issues with finding an input
# set for it in which case a false result may be returned.
else:
logger.info('Subject list: %s' % str(subj_list))
input_set = []
meaningful_res_code = None
# Each subject might produce a different input set and we need to
# combine them
for subj in subj_list:
inp, res_code = self.process_subject(subj)
if res_code:
meaningful_res_code = res_code
continue
input_set += inp
if not input_set and meaningful_res_code:
return None, None, meaningful_res_code
logger.info('Input set: %s' % str(input_set))
# Statement object is None
if all(o is None for o in obj_list):
obj_list = None
return input_set, obj_list, None
def find_paths(self, input_set, target, max_paths=1, max_path_length=5,
loop=False, dummy_target=False, filter_func=None):
"""Check for a source/target path in the model.
Parameters
----------
input_set : list or None
A list of potenital sources or None if the test statement subject
is None.
target : tuple
Tuple representing the target node (usually common target node).
max_paths : int
The maximum number of specific paths to return.
max_path_length : int
The maximum length of specific paths to return.
loop : bool
Whether we are looking for a loop path.
dummy_target : False
Whether the target is a dummy node.
filter_func : function or None
A function to constrain the search. A function should take a node
as a parameter and return True if the node is allowed to be in a
path and False otherwise. If None, then no filtering is done.
Returns
-------
PathResult
PathResult object indicating the results of the attempt to find
a path.
"""
# # -- Route to the path sampling function --
# NOTE this is not generic at this point!
# if self.do_sampling:
# if not has_pg:
# raise Exception('The paths_graph package could not be '
# 'imported.')
# return self._sample_paths(input_set, obj, target_polarity,
# max_paths, max_path_length)
# -- Do Breadth-First Enumeration --
# Generate the predecessors to our observable and count the paths
path_lengths = []
path_metrics = []
sources = []
for source, path_length in find_sources(self.graph, target, input_set,
filter_func):
# If a dummy target is used, we need to subtract one edge.
# In case of loops, we are already missing one edge, there's no
# need to subtract one more.
if dummy_target and not loop:
path_length = path_length - 1
# There might be a case when sources and targets contain the same
# nodes (e.g. different agent state in PyBEL networks) that would
# show up as paths of length 0. We only want to include meaningful
# paths that contain at least one edge.
if path_length > 0:
pm = PathMetric(source, target, path_length)
path_metrics.append(pm)
path_lengths.append(path_length)
# Keep unique sources but use a list, not set to preserve order
if source not in sources:
sources.append(source)
# Now, look for paths
if path_metrics and max_paths == 0:
pr = PathResult(True, 'MAX_PATHS_ZERO',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
elif path_metrics:
if min(path_lengths) <= max_path_length:
if dummy_target and not loop:
search_path_length = min(path_lengths) + 1
else:
search_path_length = min(path_lengths)
pr = PathResult(True, 'PATHS_FOUND',
max_paths, max_path_length)
pr.path_metrics = path_metrics
# Get the first path
# Try to find paths of fixed length using sources found above
for source in sources:
logger.info('Finding paths between %s and %s'
% (str(source), target))
path_iter = get_path_iter(
self.graph, source, target, search_path_length, loop,
dummy_target, filter_func)
for path in path_iter:
pr.add_path(tuple(path))
# Do not get next path if reached max_paths
if len(pr.paths) >= max_paths:
break
# Do not check next source if reached max_paths
if len(pr.paths) >= max_paths:
break
return pr
# There are no paths shorter than the max path length, so we
# don't bother trying to get them
else:
pr = PathResult(True, 'MAX_PATH_LENGTH_EXCEEDED',
max_paths, max_path_length)
pr.path_metrics = path_metrics
return pr
else:
return PathResult(False, 'NO_PATHS_FOUND',
max_paths, max_path_length)
def make_false_result(self, result_code, max_paths, max_path_length):
return PathResult(False, result_code, max_paths, max_path_length)
def update_filter_func(self, agent_filter_func):
"""Converts a function filtering agents to a function filtering nodes
Parameters
----------
agent_filter_func : function
A function to constrain the intermediate nodes in the path. A
function should take an agent as a parameter and return True if the
agent is allowed to be in a path and False otherwise.
Returns
-------
node_filter_func : function
A new filter function applying the logic from agent_filter_func to
nodes instead of agents.
"""
if agent_filter_func is None:
return None
def node_filter_func(n):
# We're using n[0] here because n is a signed node while
# nodes_to_agents contains unsigned nodes (equivalent of n[0])
ag = self.nodes_to_agents.get(n[0])
if ag is None:
logger.warning('Could not get agent for node %s' % n[0])
# Do not filter the node if we can't map it to agent
return True
return agent_filter_func(ag)
logger.info('Converted %s to node filter function'
% agent_filter_func.__name__)
return node_filter_func
def get_nodes_to_agents(self, *args, **kwargs):
"""Return a dictionary mapping nodes of intermediate signed edges graph
to INDRA agents.
"""
raise NotImplementedError("Method must be implemented in child class.")
def get_graph(self, **kwargs):
"""Return a graph with signed nodes to find the path."""
raise NotImplementedError("Method must be implemented in child class.")
def process_statement(self, stmt):
"""
This method processes the test statement to get the data about subject
and object, according to the specific model requirements for model
checking, e.g. PysbModelChecker gets subject monomer patterns and
observables, while graph based ModelCheckers will return signed nodes
corresponding to subject and object. If any of the requirements are not
satisfied, result code is also returned to construct PathResult object.
Parameters
----------
stmt : indra.statements.Statement
A statement to process.
Returns
-------
subj_data : list or None
Data about statement subject to be used as source nodes.
obj_data : list or None
Data about statement object to be used as target nodes.
result_code : str or None
Result code to construct PathResult.
"""
raise NotImplementedError("Method must be implemented in child class.")
def process_subject(self, subj_data):
"""Processes the subject of the test statement and returns
the necessary information to check the statement. In case of
PysbModelChecker, method returns input_rule_set. If any of the
requirements are not satisfied, result code is also returned to
construct PathResult object.
"""
raise NotImplementedError("Method must be implemented in child class.")
def _sample_paths(self, input_set, obj_name, target_polarity,
max_paths=1, max_path_length=5):
raise NotImplementedError("Method must be implemented in child class.")
def signed_edges_to_signed_nodes(graph, prune_nodes=True,
edge_signs={'pos': 0, 'neg': 1},
copy_edge_data=False):
"""Convert a graph with signed edges to a graph with signed nodes.
Each pair of nodes linked by an edge in an input graph are represented
as four nodes and two edges in the new graph. For example, an edge (a,
b, 0), where a and b are nodes and 0 is a sign of an edge (positive),
will be represented as edges ((a, 0), (b, 0)) and ((a, 1), (b, 1)),
where (a, 0), (a, 1), (b, 0), (b, 1) are signed nodes. An edge (a, b,
1) with sign 1 (negative) will be represented as edges ((a, 0), (b,
1)) and ((a, 1), (b, 0)).
Parameters
----------
graph : networkx.MultiDiGraph
Graph with signed edges to convert. Can have multiple edges between
a pair of nodes.
prune_nodes : Optional[bool]
If True, iteratively prunes negative (with sign 1) nodes without
predecessors.
edge_signs : dict
A dictionary representing the signing policy of incoming graph. The
dictionary should have strings 'pos' and 'neg' as keys and integers
as values.
copy_edge_data : bool|set(keys)
Option for copying edge data as well from graph. If False (default),
no edge data is copied (except sign). If True, all edge data is
copied. If a set of keys is provided, only the keys appearing in the
set will be copied, assuming the key is part of a nested dictionary.
Returns
-------
signed_nodes_graph : networkx.DiGraph
"""
signed_nodes_graph = nx.DiGraph()
nodes = []
for node, node_data in graph.nodes(data=True):
nodes.append(((node, 0), node_data))
nodes.append(((node, 1), node_data))
signed_nodes_graph.add_nodes_from(nodes)
edges = []
for u, v, edge_data in graph.edges(data=True):
copy_dict = deepcopy(edge_data)
edge_sign = copy_dict.pop('sign', None)
if edge_sign is None:
continue
edge_dict = copy_dict if copy_edge_data == True else \
({k: v for k, v in copy_dict.items() if k in copy_edge_data} if
isinstance(copy_edge_data, set) else {})
if edge_sign == edge_signs['pos']:
edges.append(((u, 0), (v, 0), edge_dict))
edges.append(((u, 1), (v, 1), edge_dict))
elif edge_sign == edge_signs['neg']:
edges.append(((u, 0), (v, 1), edge_dict))
edges.append(((u, 1), (v, 0), edge_dict))
signed_nodes_graph.add_edges_from(edges)
if prune_nodes:
signed_nodes_graph = prune_signed_nodes(signed_nodes_graph)
return signed_nodes_graph
def prune_signed_nodes(graph):
"""Prune nodes with sign (1) if they do not have predecessors."""
nodes_to_prune = [node for node, in_deg
in graph.in_degree()
if in_deg == 0 and node[1] == 1]
while nodes_to_prune:
graph.remove_nodes_from(nodes_to_prune)
# Make a list of nodes whose in degree is now 0
nodes_to_prune = [node for node, in_deg
in graph.in_degree()
if in_deg == 0 and node[1] == 1]
return graph | mit |
brentp/pedagree | peddy/king.py | 2 | 3278 | from .utils import which
import os
import subprocess
def run_king(vcf_path, ped_obj):
king = which('king')
if king is None:
raise Exception("requested king analysis, but king executable not found")
plink = which('plink')
if plink is None:
raise Exception("requested king analysis, but plink executable not found")
out = os.path.basename(vcf_path)
if out.endswith(".gz"):
out = out[:-3]
if out.endswith(".vcf"): out = out[:-4]
elog = open(out + ".plink.err", "w")
olog = open(out + ".plink.log", "w")
subprocess.check_call([plink, "--vcf", vcf_path, "--make-bed",
"--noweb",
"--out", out, "--biallelic-only", "--geno",
"0.95", "--vcf-half-call", "m"], stderr=elog,
stdout=olog)
elog.close() and olog.close()
elog = open(out + ".plink.err", "w")
olog = open(out + ".plink.log", "w")
subprocess.check_call([king, "-b", out + ".bed", "--kinship",
"--prefix", out], stderr=elog, stdout=olog)
elog.close() and olog.close()
sib_pairs, parent_child = [], []
for fid, fam in ped_obj.families.items():
sib_pairs.extend((x.sample_id, y.sample_id) for x, y in fam.sib_pairs)
parent_child.extend((x.sample_id, y.sample_id) for x, y in fam.parent_child)
king_pairs = read_king(out + ".kin0")
kingped(ped_obj, king_pairs, sib_pairs, parent_child)
def read_king(king_file):
pairs = {}
import toolshed as ts
for d in ts.reader(king_file):
pairs[(d['ID1'], d['ID2'])] = float(d['Kinship'])
pairs[(d['ID2'], d['ID1'])] = float(d['Kinship'])
return pairs
def kingped(ped_obj, king_pairs, sib_pairs, parent_child, cutoff=0.13):
seen = set()
print("sample_a\tsample_b\terror\tkinship")
fmt = "%s\t%s\t%s\t%.3f"
for s in sib_pairs:
try:
king_pairs[s]
seen.add(s)
except KeyError:
continue
v = king_pairs[s]
if v < cutoff:
print(fmt % (s[0], s[1], "sibs low kingship", v))
for kp in parent_child:
try:
king_pairs[kp]
seen.add(kp)
except KeyError:
continue
v = king_pairs[kp]
if v < cutoff:
print(fmt % (kp[0], kp[1], "parent-offspring low kinship", v))
high = [((a, b), v) for (a, b), v in king_pairs.items() if v > cutoff and not ((a, b)
in seen or (b, a) in seen)]
#print "high kinship (> %.3f), but not listed as such in ped file):" % cutoff
pair_seen = {}
for (a, b), v in high:
if (a, b) in pair_seen: continue
if (b, a) in pair_seen: continue
try:
ao, bo = ped_obj[a], ped_obj[b]
except KeyError:
continue
if ao.family_id != bo.family_id:
print(fmt % (a, b, "high kinship", v))
pair_seen[(b, a)] = True
pair_seen = {}
for k, v in king_pairs.items():
if k in pair_seen: continue
if v > 0.42:
print(fmt % (k[0], k[1], "possible duplicates", v))
pair_seen[(k[1], k[0])] = True
if __name__ == "__main__":
import sys
kingped(sys.argv[1], sys.argv[2])
| mit |
Maccimo/intellij-community | python/helpers/py3only/docutils/parsers/rst/directives/images.py | 44 | 6933 | # $Id: images.py 7753 2014-06-24 14:52:59Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
import urllib.error
import urllib.parse
import urllib.request
from docutils import nodes
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.parsers.rst.roles import set_classes
try: # check for the Python Imaging Library
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'name': directives.unchanged,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
imagepath = urllib.request.url2pathname(image_node['uri'])
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (IOError, UnicodeEncodeError):
pass # TODO: warn?
else:
self.state.document.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
figure_node['width'] = '%dpx' % img.size[0]
del img
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
caption.source = first_node.source
caption.line = first_node.line
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| apache-2.0 |
wildjan/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| apache-2.0 |
40023255/w16b_test | static/Brython3.1.3-20150514-095342/Lib/_sre.py | 622 | 51369 | # NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
| agpl-3.0 |
helldorado/ansible | test/units/modules/packaging/os/test_rhn_channel.py | 101 | 5661 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis@libregerbil.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from itertools import product
import json
from ansible.modules.packaging.os import rhn_channel
import pytest
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
def test_without_required_parameters(capfd):
with pytest.raises(SystemExit):
rhn_channel.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
TESTED_MODULE = rhn_channel.__name__
TEST_CASES = [
[
# add channel already added, check that result isn't changed
{
'name': 'rhel-x86_64-server-6',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('auth.logout', [1]),
],
'changed': False,
'msg': 'Channel rhel-x86_64-server-6 already exists',
}
],
[
# add channel, check that result is changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('system.setChildChannels', [1]),
('auth.logout', [1]),
],
'changed': True,
'msg': 'Channel rhel-x86_64-server-6-debuginfo added',
}
],
[
# remove inexistent channel, check that result isn't changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'state': 'absent',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('auth.logout', [1]),
],
'changed': False,
'msg': 'Not subscribed to channel rhel-x86_64-server-6-debuginfo.',
}
],
[
# remove channel, check that result is changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'state': 'absent',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels', [[
{'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
]]),
('channel.software.listSystemChannels', [[
{'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
]]),
('system.setChildChannels', [1]),
('auth.logout', [1]),
],
'changed': True,
'msg': 'Channel rhel-x86_64-server-6-debuginfo removed'
}
]
]
@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
def test_rhn_channel(capfd, mocker, testcase, mock_request):
"""Check 'msg' and 'changed' results"""
with pytest.raises(SystemExit):
rhn_channel.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] == testcase['changed']
assert results['msg'] == testcase['msg']
assert not testcase['calls'] # all calls should have been consumed
| gpl-3.0 |
andreh7/deap | deap/creator.py | 9 | 6598 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`~deap.creator` is a meta-factory allowing to create classes that
will fulfill the needs of your evolutionary algorithms. In effect, new
classes can be built from any imaginable type, from :class:`list` to
:class:`set`, :class:`dict`, :class:`~deap.gp.PrimitiveTree` and more,
providing the possibility to implement genetic algorithms, genetic
programming, evolution strategies, particle swarm optimizers, and many more.
"""
import array
import copy
import warnings
class_replacers = {}
"""Some classes in Python's standard library as well as third party library
may be in part incompatible with the logic used in DEAP. To palliate
this problem, the method :func:`create` uses the dictionary
`class_replacers` to identify if the base type provided is problematic, and if
so the new class inherits from the replacement class instead of the
original base class.
`class_replacers` keys are classes to be replaced and the values are the
replacing classes.
"""
try:
import numpy
(numpy.ndarray, numpy.array)
except ImportError:
# Numpy is not present, skip the definition of the replacement class.
pass
except AttributeError:
# Numpy is present, but there is either no ndarray or array in numpy,
# also skip the definition of the replacement class.
pass
else:
class _numpy_array(numpy.ndarray):
def __deepcopy__(self, memo):
"""Overrides the deepcopy from numpy.ndarray that does not copy
the object's attributes. This one will deepcopy the array and its
:attr:`__dict__` attribute.
"""
copy_ = numpy.ndarray.copy(self)
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
@staticmethod
def __new__(cls, iterable):
"""Creates a new instance of a numpy.ndarray from a function call.
Adds the possibility to instanciate from an iterable."""
return numpy.array(list(iterable)).view(cls)
def __setstate__(self, state):
self.__dict__.update(state)
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[numpy.ndarray] = _numpy_array
class _array(array.array):
@staticmethod
def __new__(cls, seq=()):
return super(_array, cls).__new__(cls, cls.typecode, seq)
def __deepcopy__(self, memo):
"""Overrides the deepcopy from array.array that does not copy
the object's attributes and class type.
"""
cls = self.__class__
copy_ = cls.__new__(cls, self)
memo[id(self)] = copy_
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[array.array] = _array
def create(name, base, **kargs):
"""Creates a new class named *name* inheriting from *base* in the
:mod:`~deap.creator` module. The new class can have attributes defined by
the subsequent keyword arguments passed to the function create. If the
argument is a class (without the parenthesis), the __init__ function is
called in the initialization of an instance of the new object and the
returned instance is added as an attribute of the class' instance.
Otherwise, if the argument is not a class, (for example an :class:`int`),
it is added as a "static" attribute of the class.
:param name: The name of the class to create.
:param base: A base class from which to inherit.
:param attribute: One or more attributes to add on instanciation of this
class, optional.
The following is used to create a class :class:`Foo` inheriting from the
standard :class:`list` and having an attribute :attr:`bar` being an empty
dictionary and a static attribute :attr:`spam` initialized to 1. ::
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
The :ref:`creating-types` tutorial gives more examples of the creator
usage.
.. warning::
If your are inheriting from :class:`numpy.ndarray` see the
:doc:`tutorials/advanced/numpy` tutorial and the
:doc:`/examples/ga_onemax_numpy` example.
"""
if name in globals():
warnings.warn("A class named '{0}' has already been created and it "
"will be overwritten. Consider deleting previous "
"creation of that class or rename it.".format(name),
RuntimeWarning)
dict_inst = {}
dict_cls = {}
for obj_name, obj in kargs.iteritems():
if isinstance(obj, type):
dict_inst[obj_name] = obj
else:
dict_cls[obj_name] = obj
# Check if the base class has to be replaced
if base in class_replacers:
base = class_replacers[base]
# A DeprecationWarning is raised when the object inherits from the
# class "object" which leave the option of passing arguments, but
# raise a warning stating that it will eventually stop permitting
# this option. Usually this happens when the base class does not
# override the __init__ method from object.
def initType(self, *args, **kargs):
"""Replace the __init__ function of the new type, in order to
add attributes that were defined with **kargs to the instance.
"""
for obj_name, obj in dict_inst.iteritems():
setattr(self, obj_name, obj())
if base.__init__ is not object.__init__:
base.__init__(self, *args, **kargs)
objtype = type(str(name), (base,), dict_cls)
objtype.__init__ = initType
globals()[name] = objtype
| lgpl-3.0 |
yfried/ansible | test/units/modules/network/dellos10/test_dellos10_facts.py | 56 | 4738 | # (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from units.modules.utils import set_module_args
from .dellos10_module import TestDellos10Module, load_fixture
from ansible.modules.network.dellos10 import dellos10_facts
class TestDellos10Facts(TestDellos10Module):
module = dellos10_facts
def setUp(self):
super(TestDellos10Facts, self).setUp()
self.mock_run_command = patch(
'ansible.modules.network.dellos10.dellos10_facts.run_commands')
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestDellos10Facts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
if '|' in command:
command = str(command).replace('|', '')
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
filename = filename.replace(':', '_colon_')
output.append(load_fixture(filename))
return output
self.run_command.side_effect = load_from_file
def test_dellos10_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
def test_dellos10_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
def test_dellos10_facts_gather_subset_hardware(self):
set_module_args({'gather_subset': 'hardware'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('x86_64', ansible_facts['ansible_net_cpu_arch'])
self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
def test_dellos10_facts_gather_subset_interfaces(self):
set_module_args({'gather_subset': 'interfaces'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(sorted(['mgmt1/1/1', 'ethernet1/1/4', 'ethernet1/1/2', 'ethernet1/1/3', 'ethernet1/1/1']),
sorted(list(ansible_facts['ansible_net_neighbors'].keys())))
self.assertIn('ansible_net_interfaces', ansible_facts)
| gpl-3.0 |
unreal666/youtube-dl | youtube_dl/extractor/mgoon.py | 64 | 2696 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
)
class MgoonIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
video\.mgoon\.com)/(?P<id>[0-9]+)'''
_API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
_TESTS = [
{
'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
'info_dict': {
'id': '5582148',
'uploader_id': 'hi6618',
'duration': 240.419,
'upload_date': '20131220',
'ext': 'mp4',
'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
'thumbnail': r're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.mgoon.com/play/view/5582148',
'only_matching': True,
},
{
'url': 'http://video.mgoon.com/5582148',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
data = self._download_json(self._API_URL.format(video_id), video_id)
if data.get('errorInfo', {}).get('code') != 'NONE':
raise ExtractorError('%s encountered an error: %s' % (
self.IE_NAME, data['errorInfo']['message']), expected=True)
v_info = data['videoInfo']
title = v_info.get('v_title')
thumbnail = v_info.get('v_thumbnail')
duration = v_info.get('v_duration')
upload_date = unified_strdate(v_info.get('v_reg_date'))
uploader_id = data.get('userInfo', {}).get('u_alias')
if duration:
duration /= 1000.0
age_limit = None
if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
age_limit = 18
formats = []
get_quality = qualities(['360p', '480p', '720p', '1080p'])
for fmt in data['videoFiles']:
formats.append({
'format_id': fmt['label'],
'quality': get_quality(fmt['label']),
'url': fmt['url'],
'ext': fmt['format'],
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader_id': uploader_id,
'age_limit': age_limit,
}
| unlicense |
HomuHomu/Kernel-SM-G935D-MM | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
vaginessa/pyload | module/gui/AccountEdit.py | 41 | 3152 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from os.path import join
class AccountEdit(QWidget):
"""
account editor widget
"""
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle(_("Edit account"))
self.setWindowIcon(QIcon(join(pypath, "icons","logo.png")))
self.setLayout(QGridLayout())
l = self.layout()
typeLabel = QLabel(_("Type"))
loginLabel = QLabel(_("Login"))
passwordLabel = QLabel(_("New password"))
changePw = QCheckBox()
changePw.setChecked(False)
self.changePw = changePw
password = QLineEdit()
password.setEnabled(False)
password.setEchoMode(QLineEdit.Password)
self.password = password
login = QLineEdit()
self.login = login
acctype = QComboBox()
self.acctype = acctype
save = QPushButton(_("Save"))
self.connect(changePw, SIGNAL("toggled(bool)"), password, SLOT("setEnabled(bool)"))
l.addWidget(save, 3, 0, 1, 3)
l.addWidget(acctype, 0, 1, 1, 2)
l.addWidget(login, 1, 1, 1, 2)
l.addWidget(password, 2, 2)
l.addWidget(changePw, 2, 1)
l.addWidget(passwordLabel, 2, 0)
l.addWidget(loginLabel, 1, 0)
l.addWidget(typeLabel, 0, 0)
self.connect(save, SIGNAL("clicked()"), self.slotSave)
def slotSave(self):
"""
save entered data
"""
data = {"login": str(self.login.text()), "acctype": str(self.acctype.currentText()), "password": False}
if self.changePw.isChecked():
data["password"] = str(self.password.text())
self.emit(SIGNAL("done"), data)
@staticmethod
def newAccount(types):
"""
create empty editor instance
"""
w = AccountEdit()
w.setWindowTitle(_("Create account"))
w.changePw.setChecked(True)
w.password.setEnabled(True)
w.acctype.addItems(types)
return w
@staticmethod
def editAccount(types, base):
"""
create editor instance with given data
"""
w = AccountEdit()
w.acctype.addItems(types)
w.acctype.setCurrentIndex(types.index(base["type"]))
w.login.setText(base["login"])
return w
| gpl-3.0 |
keedio/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/style.py | 75 | 3745 | # -*- coding: utf-8 -*-
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]+'0'+col[1]+'0'+col[2]+'0'
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
return {
'color': t[0] or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': t[4] or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(object):
__metaclass__ = StyleMeta
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: Style definitions for individual token types.
styles = {}
| apache-2.0 |
Glyfina-Fernando/pymtl | pisa/pisa_inst_or_test.py | 4 | 5460 | #=========================================================================
# pisa_or_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 0x0f0f0f0f
mfc0 r2, mngr2proc < 0x00ff00ff
nop
nop
nop
nop
nop
nop
nop
nop
or r3, r1, r2
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0x0fff0fff
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rr_dest_byp_test( 5, "or", 0x00000f0f, 0x000000ff, 0x00000fff ),
gen_rr_dest_byp_test( 4, "or", 0x0000f0f0, 0x00000ff0, 0x0000fff0 ),
gen_rr_dest_byp_test( 3, "or", 0x000f0f00, 0x0000ff00, 0x000fff00 ),
gen_rr_dest_byp_test( 2, "or", 0x00f0f000, 0x000ff000, 0x00fff000 ),
gen_rr_dest_byp_test( 1, "or", 0x0f0f0000, 0x00ff0000, 0x0fff0000 ),
gen_rr_dest_byp_test( 0, "or", 0xf0f00000, 0x0ff00000, 0xfff00000 ),
]
#-------------------------------------------------------------------------
# gen_src0_byp_test
#-------------------------------------------------------------------------
def gen_src0_byp_test():
return [
gen_rr_src0_byp_test( 5, "or", 0x0f00000f, 0xff000000, 0xff00000f ),
gen_rr_src0_byp_test( 4, "or", 0xf00000f0, 0xf000000f, 0xf00000ff ),
gen_rr_src0_byp_test( 3, "or", 0x00000f0f, 0x000000ff, 0x00000fff ),
gen_rr_src0_byp_test( 2, "or", 0x0000f0f0, 0x00000ff0, 0x0000fff0 ),
gen_rr_src0_byp_test( 1, "or", 0x000f0f00, 0x0000ff00, 0x000fff00 ),
gen_rr_src0_byp_test( 0, "or", 0x00f0f000, 0x000ff000, 0x00fff000 ),
]
#-------------------------------------------------------------------------
# gen_src1_byp_test
#-------------------------------------------------------------------------
def gen_src1_byp_test():
return [
gen_rr_src1_byp_test( 5, "or", 0x0f0f0000, 0x00ff0000, 0x0fff0000 ),
gen_rr_src1_byp_test( 4, "or", 0xf0f00000, 0x0ff00000, 0xfff00000 ),
gen_rr_src1_byp_test( 3, "or", 0x0f00000f, 0xff000000, 0xff00000f ),
gen_rr_src1_byp_test( 2, "or", 0xf00000f0, 0xf000000f, 0xf00000ff ),
gen_rr_src1_byp_test( 1, "or", 0x00000f0f, 0x000000ff, 0x00000fff ),
gen_rr_src1_byp_test( 0, "or", 0x0000f0f0, 0x00000ff0, 0x0000fff0 ),
]
#-------------------------------------------------------------------------
# gen_srcs_byp_test
#-------------------------------------------------------------------------
def gen_srcs_byp_test():
return [
gen_rr_srcs_byp_test( 5, "or", 0x000f0f00, 0x0000ff00, 0x000fff00 ),
gen_rr_srcs_byp_test( 4, "or", 0x00f0f000, 0x000ff000, 0x00fff000 ),
gen_rr_srcs_byp_test( 3, "or", 0x0f0f0000, 0x00ff0000, 0x0fff0000 ),
gen_rr_srcs_byp_test( 2, "or", 0xf0f00000, 0x0ff00000, 0xfff00000 ),
gen_rr_srcs_byp_test( 1, "or", 0x0f00000f, 0xff000000, 0xff00000f ),
gen_rr_srcs_byp_test( 0, "or", 0xf00000f0, 0xf000000f, 0xf00000ff ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rr_src0_eq_dest_test( "or", 0x00000f0f, 0x000000ff, 0x00000fff ),
gen_rr_src1_eq_dest_test( "or", 0x0000f0f0, 0x00000ff0, 0x0000fff0 ),
gen_rr_src0_eq_src1_test( "or", 0x000f0f00, 0x000f0f00 ),
gen_rr_srcs_eq_dest_test( "or", 0x000f0f00, 0x000f0f00 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rr_value_test( "or", 0xff00ff00, 0x0f0f0f0f, 0xff0fff0f ),
gen_rr_value_test( "or", 0x0ff00ff0, 0xf0f0f0f0, 0xfff0fff0 ),
gen_rr_value_test( "or", 0x00ff00ff, 0x0f0f0f0f, 0x0fff0fff ),
gen_rr_value_test( "or", 0xf00ff00f, 0xf0f0f0f0, 0xf0fff0ff ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src0 = Bits( 32, random.randint(0,0xffffffff) )
src1 = Bits( 32, random.randint(0,0xffffffff) )
dest = src0 | src1
asm_code.append( gen_rr_value_test( "or", src0.uint(), src1.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src0_byp_test ),
asm_test( gen_src1_byp_test ),
asm_test( gen_srcs_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
| bsd-3-clause |
bjodah/chempy | chempy/tests/test_chemistry.py | 1 | 20158 | # -*- coding: utf-8 -*-
from functools import reduce
from operator import attrgetter, add
import sys
import pytest
from ..util.arithmeticdict import ArithmeticDict
from ..util.testing import requires
from ..util.parsing import parsing_library
from ..units import default_units, units_library, to_unitless, allclose
from ..chemistry import (
equilibrium_quotient,
Substance,
Species,
Reaction,
Equilibrium,
balance_stoichiometry,
)
if sys.version_info < (3, 6, 0):
class ModuleNotFoundError(ImportError):
pass
@requires("numpy")
def test_equilibrium_quotient():
assert (
abs(equilibrium_quotient([2.3, 3.7, 5.1], (-1, -1, 1)) - 5.1 / 2.3 / 3.7)
< 1e-14
)
@requires(parsing_library)
def test_Substance():
s = Substance.from_formula("H+")
assert s.composition == {0: 1, 1: 1}
assert s.charge == 1
assert abs(s.mass - 1.008) < 1e-3
def test_Substance__2():
H2O = Substance(
name="H2O", charge=0, latex_name=r"\mathrm{H_{2}O}", data={"pKa": 14}
) # will_be_missing_in='0.8.0', use data=...
OH_m = Substance(name="OH-", charge=-1, latex_name=r"\mathrm{OH^{-}}")
assert sorted([OH_m, H2O], key=attrgetter("name")) == [H2O, OH_m]
@requires(parsing_library)
def test_Substance__from_formula():
H2O = Substance.from_formula("H2O")
assert H2O.composition == {1: 2, 8: 1}
assert H2O.latex_name == "H_{2}O"
assert H2O.unicode_name == u"H₂O"
assert H2O.html_name == u"H<sub>2</sub>O"
@requires(parsing_library)
def test_Species():
s = Species.from_formula("H2O")
assert s.phase_idx == 0
mapping = {"(aq)": 0, "(s)": 1, "(g)": 2}
assert Species.from_formula("CO2(g)").phase_idx == 3
assert Species.from_formula("CO2(g)", mapping).phase_idx == 2
assert Species.from_formula("CO2(aq)", mapping).phase_idx == 0
assert Species.from_formula("NaCl(s)").phase_idx == 1
assert Species.from_formula("NaCl(s)", phase_idx=7).phase_idx == 7
assert Species.from_formula("CO2(aq)", mapping, phase_idx=7).phase_idx == 7
uranyl_ads = Species.from_formula("UO2+2(ads)", phases={"(aq)": 0, "(ads)": 1})
assert uranyl_ads.composition == {0: 2, 92: 1, 8: 2}
assert uranyl_ads.phase_idx == 1
def test_Solute():
from ..chemistry import Solute
from ..util.pyutil import ChemPyDeprecationWarning
with pytest.warns(ChemPyDeprecationWarning):
w = Solute("H2O")
assert w.name == "H2O"
def test_Reaction():
substances = s_Hp, s_OHm, s_H2O = (
Substance("H+", composition={0: 1, 1: 1}),
Substance("OH-", composition={0: -1, 1: 1, 8: 1}),
Substance("H2O", composition={0: 0, 1: 2, 8: 1}),
)
substance_names = Hp, OHm, H2O = [s.name for s in substances]
substance_dict = {n: s for n, s in zip(substance_names, substances)}
r1 = Reaction({Hp: 1, OHm: 1}, {H2O: 1})
assert sum(r1.composition_violation(substance_dict)) == 0
assert r1.composition_violation(substance_dict, ["H+"]) == [0]
viol, cks = r1.composition_violation(substance_dict, True)
assert viol == [0] * 3 and sorted(cks) == [0, 1, 8]
assert r1.charge_neutrality_violation(substance_dict) == 0
r2 = Reaction({Hp: 1, OHm: 1}, {H2O: 2})
assert sum(r2.composition_violation(substance_dict)) != 0
assert r2.charge_neutrality_violation(substance_dict) == 0
r3 = Reaction({Hp: 2, OHm: 1}, {H2O: 2})
assert sum(r3.composition_violation(substance_dict)) != 0
assert r3.charge_neutrality_violation(substance_dict) != 0
assert r3.keys() == {Hp, OHm, H2O}
with pytest.raises(ValueError):
Reaction({Hp: -1, OHm: -1}, {H2O: -1})
assert r1 == Reaction({"H+", "OH-"}, {"H2O"})
r4 = Reaction({Hp, OHm}, {H2O}, 7)
ref = {Hp: -3 * 5 * 7, OHm: -3 * 5 * 7, H2O: 3 * 5 * 7}
r4.rate({Hp: 5, OHm: 3}) == ref
r5 = r4.copy()
assert r5 == r4
assert r5 != r1
lhs5, rhs5 = {"H+": 1, "OH-": 1}, {"H2O": 1}
r5 = Reaction(lhs5, rhs5)
assert r5.reac == lhs5 and r5.prod == rhs5
def test_Reaction__copy():
r1 = Reaction({"H2O"}, {"H2O"}, checks=())
r2 = r1.copy()
assert r1 == r2
r2.reac["H2O2"] = r2.reac.pop("H2O") # 1
r2.prod["H2O2"] = r2.prod.pop("H2O") # 1
assert r1.reac == {"H2O": 1} and r1.prod == {"H2O": 1}
@requires(parsing_library)
def test_Reaction__from_string():
r = Reaction.from_string("H2O -> H+ + OH-; 1e-4", "H2O H+ OH-".split())
assert r.reac == {"H2O": 1} and r.prod == {"H+": 1, "OH-": 1}
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H+ + OH-; 1e-4", "H2O H OH-".split())
r2 = Reaction.from_string("H2O -> H+ + OH-; 1e-4; ref='important_paper'")
assert r2.ref == "important_paper"
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H2O")
Reaction.from_string("H2O -> H2O; None; checks=()")
with pytest.raises(ValueError):
Reaction({"H2O": 2}, {"H2O2": 2, "O2": -2})
r4 = Reaction({"H+": 2, "OH-": 1}, {"H2O": 2}, 42.0)
assert Reaction.from_string(str(r4), "H+ OH- H2O") == r4
assert Reaction.from_string(str(r4), None) == r4
r5 = Reaction.from_string(
"H2O2 -> 0.5 O2 + H2O",
checks=[c for c in Reaction.default_checks if c != "all_integral"],
)
r6 = r5.copy()
assert r5 == r6
r7 = Reaction.from_string(
"H2O -> H + OH; None; data=dict(ref='foo; bar; baz;') # foobar"
)
assert r7.data["ref"] == "foo; bar; baz;"
@requires(parsing_library, units_library)
def test_Reaction_from_string__units():
r5 = Reaction.from_string("2 H2O2 -> O2 + 2 H2O; 1e-7/molar/second", "H2O O2 H2O2")
assert to_unitless(r5.param, 1 / default_units.molar / default_units.second) == 1e-7
r6 = Reaction.from_string("->", checks=())
assert r6.reac == {} and r6.prod == {}
r7 = Reaction.from_string("2 A -> B; exp(log(2e-3))*metre**3/mol/hour", None)
assert r7.reac == {"A": 2} and r7.prod == {"B": 1}
assert allclose(
r7.param,
2e-3 * default_units.metre ** 3 / default_units.mol / default_units.hour,
)
with pytest.raises(ValueError):
Reaction.from_string("2 A -> B; 2e-3/hour", None)
r8 = Reaction.from_string('A -> B; "k"')
assert r8.rate_expr().args is None
assert r8.rate_expr().unique_keys == ("k",)
r9 = Reaction.from_string("A -> B; 42.0")
assert r9.rate_expr().args == [42.0]
assert r9.rate_expr().unique_keys is None
Reaction.from_string("H+ + OH- -> H2O; 1e10/M/s", "H2O H+ OH-".split())
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H+ + OH-; 1e-4/M/s", "H2O H+ OH-".split())
@requires(parsing_library, units_library)
def test_Substance__molar_mass():
mw_water = Substance.from_formula("H2O").molar_mass(default_units)
q = mw_water / ((15.9994 + 2 * 1.008) * default_units.gram / default_units.mol)
assert abs(q - 1) < 1e-3
@requires(units_library)
def test_Equilibrium__as_reactions():
s = default_units.second
M = default_units.molar
H2O, Hp, OHm = map(Substance, "H2O H+ OH-".split())
eq = Equilibrium({"H2O": 1}, {"H+": 1, "OH-": 1}, 1e-14)
rate = 1.31e11 / M / s
fw, bw = eq.as_reactions(kb=rate, units=default_units)
assert abs((bw.param - rate) / rate) < 1e-15
assert abs((fw.param / M) / bw.param - 1e-14) / 1e-14 < 1e-15
@requires(parsing_library)
def test_ReactioN__latex():
keys = "H2O H2 O2".split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.latex(subst) == r"2 H_{2}O \rightarrow 2 H_{2} + O_{2}"
r3 = Reaction.from_string("2 H2O -> 2 H2 + O2; 42; name='split'", subst)
assert (
r3.latex(subst, with_param=True, with_name=True)
== r"2 H_{2}O \rightarrow 2 H_{2} + O_{2}; 42; split"
)
assert (
r3.latex(subst, with_name=True)
== r"2 H_{2}O \rightarrow 2 H_{2} + O_{2}; split"
)
assert (
r3.latex(subst, with_param=True) == r"2 H_{2}O \rightarrow 2 H_{2} + O_{2}; 42"
)
assert r3.latex(subst) == r"2 H_{2}O \rightarrow 2 H_{2} + O_{2}"
@requires(parsing_library)
def test_Reaction__unicode():
keys = u"H2O H2 O2".split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.unicode(subst) == u"2 H₂O → 2 H₂ + O₂"
r3 = Reaction.from_string("2 H2O -> 2 H2 + O2; 42; name='split'", subst)
assert r3.unicode(subst) == u"2 H₂O → 2 H₂ + O₂"
assert r3.unicode(subst, with_name=True) == u"2 H₂O → 2 H₂ + O₂; split"
assert (
r3.unicode(subst, with_name=True, with_param=True)
== u"2 H₂O → 2 H₂ + O₂; 42; split"
)
assert r3.unicode(subst, with_param=True) == u"2 H₂O → 2 H₂ + O₂; 42"
@requires(parsing_library)
def test_Reaction__html():
keys = "H2O H2 O2".split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.html(subst) == "2 H<sub>2</sub>O → 2 H<sub>2</sub> + O<sub>2</sub>"
assert (
r2.html(subst, Reaction_coeff_fmt=lambda s: "<b>{0}</b>".format(s))
== "<b>2</b> H<sub>2</sub>O → <b>2</b> H<sub>2</sub> + O<sub>2</sub>"
)
assert (
r2.html(subst, Reaction_formula_fmt=lambda s: "<b>{0}</b>".format(s))
== "2 <b>H<sub>2</sub>O</b> → 2 <b>H<sub>2</sub></b> + <b>O<sub>2</sub></b>"
)
def test_Reaction__idempotency():
with pytest.raises(ValueError):
Reaction({"A": 1}, {"A": 1})
with pytest.raises(ValueError):
Reaction({}, {})
with pytest.raises(ValueError):
Reaction({"A": 1}, {"B": 1}, inact_reac={"B": 1}, inact_prod={"A": 1})
@requires("sympy")
def test_Equilibrium__eliminate():
e1 = Equilibrium({"A": 1, "B": 2}, {"C": 3})
e2 = Equilibrium({"D": 5, "B": 7}, {"E": 11})
coeff = Equilibrium.eliminate([e1, e2], "B")
assert coeff == [7, -2]
e3 = coeff[0] * e1 + coeff[1] * e2
assert e3.net_stoich("B") == (0,)
e4 = e1 * coeff[0] + coeff[1] * e2
assert e4.net_stoich("B") == (0,)
assert (-e1).reac == {"C": 3}
assert (e2 * -3).reac == {"E": 33}
@requires(parsing_library, units_library)
def test_Equilibrium__from_string():
assert Equilibrium.from_string("H2O = H+ + OH-").param is None
assert Equilibrium.from_string("H2O = H+ + OH-; 1e-14").param == 1e-14
assert Equilibrium.from_string("H2O = H+ + OH-; 1e-14*molar").param ** 0 == 1
with pytest.raises(ValueError):
Equilibrium.from_string("H+ + OH- = H2O; 1e-14*molar")
eq5 = Equilibrium.from_string(
"CO2(aq) = CO2(g);" "chempy.henry.HenryWithUnits(3.3e-4 * molar / Pa, 2400 * K)"
)
assert eq5.reac == {"CO2(aq)": 1}
def test_Equilibrium__cancel():
# 2B + C -> E
e1 = Equilibrium({"A": 26, "B": 20, "C": 7}, {"D": 4, "E": 7})
e2 = Equilibrium({"A": 13, "B": 3}, {"D": 2})
coeff = e1.cancel(e2)
assert coeff == -2
@requires("sympy")
def test_balance_stoichiometry():
# 4 NH4ClO4 -> 2 N2 + 4 HCl + 6H2O + 5O2
# 4 Al + 3O2 -> 2Al2O3
# ---------------------------------------
# 6 NH4ClO4 + 10 Al + -> 3 N2 + 6 HCl + 9 H2O + 5 Al2O3
reac, prod = balance_stoichiometry({"NH4ClO4", "Al"}, {"Al2O3", "HCl", "H2O", "N2"})
assert reac == {"NH4ClO4": 6, "Al": 10}
assert prod == {"Al2O3": 5, "HCl": 6, "H2O": 9, "N2": 3}
r3, p3 = balance_stoichiometry({"C2H6", "O2"}, {"H2O", "CO2"})
assert r3 == {"C2H6": 2, "O2": 7}
assert p3 == {"CO2": 4, "H2O": 6}
r4, p4 = balance_stoichiometry({"C7H5(NO2)3", "NH4NO3"}, {"CO", "H2O", "N2"})
assert r4 == {"C7H5(NO2)3": 2, "NH4NO3": 7}
assert p4 == {"CO": 14, "H2O": 19, "N2": 10}
a5, b5 = {"C3H5NO", "CH4", "NH3", "H2O"}, {"C2H6", "CH4O", "CH5N", "CH3N"}
formulas = list(set.union(a5, b5))
substances = dict(zip(formulas, map(Substance.from_formula, formulas)))
compositions = {k: ArithmeticDict(int, substances[k].composition) for k in formulas}
r5, p5 = balance_stoichiometry(a5, b5)
compo_reac = dict(reduce(add, [compositions[k] * v for k, v in r5.items()]))
compo_prod = dict(reduce(add, [compositions[k] * v for k, v in p5.items()]))
assert compo_reac == compo_prod
a6, b6 = map(
lambda x: set(x.split()), "CuSCN KIO3 HCl;CuSO4 KCl HCN ICl H2O".split(";")
)
r6, p6 = balance_stoichiometry(a6, b6)
assert r6 == dict(CuSCN=4, KIO3=7, HCl=14)
assert p6 == dict(CuSO4=4, KCl=7, HCN=4, ICl=7, H2O=5)
r7, p7 = balance_stoichiometry({"Zn+2", "e-"}, {"Zn"})
assert r7 == {"Zn+2": 1, "e-": 2}
assert p7 == {"Zn": 1}
r8, p8 = balance_stoichiometry({"Zn"}, {"Zn+2", "e-"})
assert r8 == {"Zn": 1}
assert p8 == {"Zn+2": 1, "e-": 2}
@requires("sympy")
def test_balance_stoichiometry__ordering():
reac, prod = "CuSCN KIO3 HCl".split(), "CuSO4 KCl HCN ICl H2O".split()
rxn = Reaction(*balance_stoichiometry(reac, prod))
res = rxn.string()
ref = "4 CuSCN + 7 KIO3 + 14 HCl -> 4 CuSO4 + 7 KCl + 4 HCN + 7 ICl + 5 H2O"
assert res == ref
@requires("sympy")
def test_balance_stoichiometry__simple():
r2, p2 = balance_stoichiometry({"Na2CO3"}, {"Na2O", "CO2"})
assert r2 == {"Na2CO3": 1}
assert p2 == {"Na2O": 1, "CO2": 1}
@requires("sympy", "pulp")
@pytest.mark.parametrize("underdet", [False, None, True])
def test_balance_stoichiometry__impossible(underdet):
try:
from pulp import PulpSolverError
except ModuleNotFoundError:
from pulp.solvers import PulpSolverError # older version of PuLP
with pytest.raises((ValueError, PulpSolverError)):
r1, p1 = balance_stoichiometry({"CO"}, {"CO2"}, underdetermined=underdet)
@requires("sympy", "pulp")
def test_balance_stoichiometry__underdetermined():
try:
from pulp import PulpSolverError
except ModuleNotFoundError:
from pulp.solvers import PulpSolverError # older version of PuLP
with pytest.raises(ValueError):
balance_stoichiometry(
{"C2H6", "O2"}, {"H2O", "CO2", "CO"}, underdetermined=False
)
reac, prod = balance_stoichiometry({"C2H6", "O2"}, {"H2O", "CO2", "CO"})
r1 = {"C7H5O3-", "O2", "C21H27N7O14P2-2", "H+"}
p1 = {
"C7H5O4-",
"C21H26N7O14P2-",
"H2O",
} # see https://github.com/bjodah/chempy/issues/67
bal1 = balance_stoichiometry(r1, p1, underdetermined=None)
assert bal1 == (
{"C21H27N7O14P2-2": 1, "H+": 1, "C7H5O3-": 1, "O2": 1},
{"C21H26N7O14P2-": 1, "H2O": 1, "C7H5O4-": 1},
)
with pytest.raises(ValueError):
balance_stoichiometry({"C3H4O3", "H3PO4"}, {"C3H6O3"}, underdetermined=None)
for underdet in [False, True, None]:
with pytest.raises((ValueError, PulpSolverError)):
balance_stoichiometry({"C3H6O3"}, {"C3H4O3"}, underdetermined=underdet)
with pytest.raises(
ValueError
): # https://github.com/bjodah/chempy/pull/86#issuecomment-375421609
balance_stoichiometry(
{"C21H36N7O16P3S", "C3H4O3"}, {"H2O", "C5H8O3", "C24H38N7O18P3S"}
)
@requires("sympy", "pulp")
def test_balance_stoichiometry__very_underdetermined():
r3 = set("O2 Fe Al Cr".split())
p3 = set("FeO Fe2O3 Fe3O4 Al2O3 Cr2O3 CrO3".split())
bal3 = balance_stoichiometry(r3, p3, underdetermined=None)
ref3 = {"Fe": 7, "Al": 2, "Cr": 3, "O2": 9}, {k: 2 if k == "FeO" else 1 for k in p3}
substances = {k: Substance.from_formula(k) for k in r3 | p3}
assert all(viol == 0 for viol in Reaction(*ref3).composition_violation(substances))
assert sum(bal3[0].values()) + sum(bal3[1].values()) <= sum(ref3[0].values()) + sum(
ref3[1].values()
)
assert bal3 == ref3
@requires("sympy", "pulp")
def test_balance_stoichiometry__underdetermined__canoncial():
# This tests for canoncial representation of the underdetermined system
# where all coefficients are integer and >= 1. It is however of limited
# practical use (and hence marked ``xfail``) since underdetermined systems
# have infinite number of solutions. It should however be possible to rewrite
# the logic so that such canoncial results are returned from balance_stoichiometry
r2 = {"O2", "O3", "C", "NO", "N2O", "NO2", "N2O4"}
p2 = {"CO", "CO2", "N2"}
bal2 = balance_stoichiometry(r2, p2, underdetermined=None)
ref2 = (
{"O2": 1, "O3": 1, "C": 7, "NO": 1, "N2O": 1, "NO2": 1, "N2O4": 1},
{"CO": 1, "CO2": 6, "N2": 3},
)
substances = {k: Substance.from_formula(k) for k in r2 | p2}
assert all(viol == 0 for viol in Reaction(*ref2).composition_violation(substances))
assert sum(bal2[0].values()) + sum(bal2[1].values()) <= sum(ref2[0].values()) + sum(
ref2[1].values()
)
assert bal2 == ref2
@requires("sympy", "pulp")
def test_balance_stoichiometry__substances__underdetermined():
substances = {
s.name: s
for s in [
Substance("eggs_6pack", composition=dict(eggs=6)),
Substance("milk_carton", composition=dict(cups_of_milk=4)),
Substance("flour_bag", composition=dict(spoons_of_flour=30)),
Substance(
"pancake", composition=dict(eggs=1, cups_of_milk=1, spoons_of_flour=2)
),
Substance(
"waffle", composition=dict(eggs=2, cups_of_milk=2, spoons_of_flour=3)
),
]
}
ur1 = {"eggs_6pack", "milk_carton", "flour_bag"}
up1 = {"pancake", "waffle"}
br1, bp1 = balance_stoichiometry(
ur1, up1, substances=substances, underdetermined=None
)
ref_r1 = {"eggs_6pack": 6, "flour_bag": 2, "milk_carton": 9}
ref_p1 = {"pancake": 12, "waffle": 12}
assert all(
viol == 0 for viol in Reaction(ref_r1, ref_p1).composition_violation(substances)
)
assert all(v > 0 for v in br1.values()) and all(v > 0 for v in bp1.values())
assert bp1 == ref_p1
assert br1 == ref_r1
@requires("sympy")
def test_balance_stoichiometry__missing_product_atom():
with pytest.raises(ValueError): # No Al on product side
balance_stoichiometry({"C7H5(NO2)3", "Al", "NH4NO3"}, {"CO", "H2O", "N2"})
@requires("sympy")
def test_balance_stoichiometry__duplicates():
cases = """
C + CO + CO2 -> C + CO # suggested solution: C + CO2 -> 2 CO
C + CO + CO2 -> C + CO2 # suggested solution: 2 CO -> C + CO2
C + CO + CO2 -> CO + CO2 # suggested solution: C + CO2 -> 2 CO
C + CO -> C + CO + CO2 # suggested solution: 2 CO -> C + CO2
C + CO2 -> C + CO + CO2 # suggested solution: C + CO2 -> 2 CO
CO + CO2 -> C + CO + CO2 # suggested solution: 2 CO -> C + CO2
"""
for prob, sol in [line.split("#") for line in cases.strip().splitlines()]:
tst_r = Reaction.from_string(prob)
ref_r = Reaction.from_string(sol.split(":")[1])
tst_bal = balance_stoichiometry(
tst_r.reac, tst_r.prod, allow_duplicates=True, underdetermined=None
)
assert Reaction(*tst_bal) == ref_r
with pytest.raises(ValueError):
balance_stoichiometry(
{"C", "CO", "CO2"},
{"C", "CO", "CO2"},
allow_duplicates=True,
underdetermined=None,
)
gh120 = {"H4P2O7", "HPO3", "H2O"}, {"H4P2O7", "HPO3"}
bal120 = balance_stoichiometry(*gh120, allow_duplicates=True, underdetermined=None)
assert bal120 == ({"HPO3": 2, "H2O": 1}, {"H4P2O7": 1})
with pytest.raises(ValueError):
balance_stoichiometry(*gh120)
# https://github.com/bjodah/chempy/issues/120#issuecomment-434453703
bal_Mn = balance_stoichiometry(
{"H2O2", "Mn1", "H1"},
{"Mn1", "H2O1"},
allow_duplicates=True,
underdetermined=None,
)
assert bal_Mn == ({"H2O2": 1, "H1": 2}, {"H2O1": 2})
bal_Mn_COx = balance_stoichiometry(
{"C", "CO", "CO2", "Mn"},
{"C", "CO2", "Mn"},
allow_duplicates=True,
underdetermined=None,
)
assert bal_Mn_COx == ({"CO": 2}, {"C": 1, "CO2": 1})
| bsd-2-clause |
jvansteirteghem/twunnel | twunnel/dns_resolver.py | 1 | 8895 | # Copyright (c) Jeroen Van Steirteghem
# See LICENSE
from twisted.internet import error, defer, protocol, reactor
from twisted.names import common, dns, hosts, resolve
from twisted.python import failure
import socket
import twunnel.logger
import twunnel.proxy_server
def setDefaultConfiguration(configuration, keys):
if "DNS_RESOLVER" in keys:
configuration.setdefault("DNS_RESOLVER", {})
configuration["DNS_RESOLVER"].setdefault("FILE", "")
configuration["DNS_RESOLVER"].setdefault("SERVERS", [])
i = 0
while i < len(configuration["DNS_RESOLVER"]["SERVERS"]):
configuration["DNS_RESOLVER"]["SERVERS"][i].setdefault("ADDRESS", "")
configuration["DNS_RESOLVER"]["SERVERS"][i].setdefault("PORT", 0)
i = i + 1
class ResolverBase(common.ResolverBase):
def __init__(self, configuration):
twunnel.logger.log(3, "trace: ResolverBase.__init__")
self.configuration = configuration
common.ResolverBase.__init__(self)
def getHostByName(self, name, timeout=None, effort=10):
twunnel.logger.log(3, "trace: ResolverBase.getHostByName")
deferred = self.lookupAllRecords(name, timeout)
deferred.addCallback(self._cbRecords, name, timeout, effort)
return deferred
def _cbRecords(self, records, name, timeout, effort):
twunnel.logger.log(3, "trace: ResolverBase._cbRecords")
(answers, authority, additional) = records
result = self._extractRecord(answers + authority + additional, name, timeout, effort)
if not result:
raise error.DNSLookupError(name)
return result
def _extractRecord(self, records, name, timeout, effort):
twunnel.logger.log(3, "trace: ResolverBase._extractRecord")
dnsName = dns.Name(name)
if not effort:
return None
for r in records:
if r.name == dnsName and r.type == dns.A:
return socket.inet_ntop(socket.AF_INET, r.payload.address)
for r in records:
if r.name == dnsName and r.type == dns.A6:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in records:
if r.name == dnsName and r.type == dns.AAAA:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in records:
if r.name == dnsName and r.type == dns.CNAME:
result = self._extractRecord(records, str(r.payload.name), timeout, effort - 1)
if not result:
return self.getHostByName(str(r.payload.name), timeout, effort - 1)
return result
for r in records:
if r.type == dns.NS:
configuration = {}
configuration["PROXY_SERVERS"] = self.configuration["PROXY_SERVERS"]
configuration["DNS_RESOLVER"] = {}
configuration["DNS_RESOLVER"]["SERVERS"] = []
configuration["DNS_RESOLVER"]["SERVERS"].append({})
configuration["DNS_RESOLVER"]["SERVERS"][0]["ADDRESS"] = str(r.payload.name)
configuration["DNS_RESOLVER"]["SERVERS"][0]["PORT"] = dns.PORT
resolver = ServerResolver(configuration)
return resolver.getHostByName(name, timeout, effort - 1)
class FileResolver(ResolverBase, hosts.Resolver):
def __init__(self, configuration, ttl = 60 * 60):
twunnel.logger.log(3, "trace: FileResolver.__init__")
self.configuration = configuration
ResolverBase.__init__(self, configuration)
hosts.Resolver.__init__(self, self.configuration["DNS_RESOLVER"]["FILE"], ttl)
def lookupAddress(self, name, timeout=None):
twunnel.logger.log(3, "trace: FileResolver.lookupAddress")
return hosts.Resolver.lookupAddress(self, name, timeout)
def lookupIPV6Address(self, name, timeout=None):
twunnel.logger.log(3, "trace: FileResolver.lookupIPV6Address")
return hosts.Resolver.lookupIPV6Address(self, name, timeout)
def _a_aaaaRecords(self, name):
twunnel.logger.log(3, "trace: FileResolver._a_aaaaRecords")
return self._aRecords(name) + self._aaaaRecords(name)
def lookupAllRecords(self, name, timeout=None):
twunnel.logger.log(3, "trace: FileResolver.lookupAllRecords")
return self._respond(name, self._a_aaaaRecords(name))
class DNSProtocolClientFactory(protocol.ClientFactory):
def __init__(self, controller):
twunnel.logger.log(3, "trace: DNSProtocolClientFactory.__init__")
self.controller = controller
def clientConnectionLost(self, connector, reason):
twunnel.logger.log(3, "trace: DNSProtocolClientFactory.clientConnectionLost")
def clientConnectionFailed(self, connector, reason):
twunnel.logger.log(3, "trace: DNSProtocolClientFactory.clientConnectionFailed")
deferreds = self.controller.deferreds[:]
del self.controller.deferreds[:]
for deferred, name, type, cls, timeout in deferreds:
deferred.errback(reason)
def buildProtocol(self, addr):
twunnel.logger.log(3, "trace: DNSProtocolClientFactory.buildProtocol")
p = dns.DNSProtocol(self.controller)
p.factory = self
return p
class ServerResolver(ResolverBase):
def __init__(self, configuration):
twunnel.logger.log(3, "trace: ServerResolver.__init__")
ResolverBase.__init__(self, configuration)
self.configuration = configuration
self.i = 0
self.connections = []
self.deferreds = []
self.factory = DNSProtocolClientFactory(self)
def connectionMade(self, connection):
twunnel.logger.log(3, "trace: ServerResolver.connectionMade")
self.connections.append(connection)
deferreds = self.deferreds[:]
del self.deferreds[:]
for (deferred, name, type, cls, timeout) in deferreds:
self._lookup(name, cls, type, timeout).chainDeferred(deferred)
def connectionLost(self, connection):
twunnel.logger.log(3, "trace: ServerResolver.connectionLost")
self.connections.remove(connection)
def messageReceived(self, message, protocol, address=None):
twunnel.logger.log(3, "trace: ServerResolver.messageReceived")
def _lookup(self, name, cls, type, timeout=None):
twunnel.logger.log(3, "trace: ServerResolver._lookup")
if not len(self.connections):
self.i = self.i + 1
if self.i >= len(self.configuration["DNS_RESOLVER"]["SERVERS"]):
self.i = 0
tunnel = twunnel.proxy_server.createTunnel(self.configuration)
tunnel.connect(self.configuration["DNS_RESOLVER"]["SERVERS"][self.i]["ADDRESS"], self.configuration["DNS_RESOLVER"]["SERVERS"][self.i]["PORT"], self.factory)
deferred = defer.Deferred()
self.deferreds.append((deferred, name, type, cls, timeout))
return deferred
else:
deferred = self.connections[0].query([dns.Query(name, type, cls)])
deferred.addCallback(self._cbMessage)
return deferred
def _cbMessage(self, message):
twunnel.logger.log(3, "trace: ServerResolver._cbMessage")
if message.rCode != dns.OK:
return failure.Failure(self.exceptionForCode(message.rCode)(message))
return (message.answers, message.authority, message.additional)
class Resolver(ResolverBase, resolve.ResolverChain):
def __init__(self, configuration):
twunnel.logger.log(3, "trace: Resolver.__init__")
resolvers = []
if configuration["DNS_RESOLVER"]["FILE"] != "":
resolvers.append(FileResolver(configuration))
if len(configuration["DNS_RESOLVER"]["SERVERS"]) != 0:
resolvers.append(ServerResolver(configuration))
ResolverBase.__init__(self, configuration)
resolve.ResolverChain.__init__(self, resolvers)
def _lookup(self, name, cls, type, timeout):
twunnel.logger.log(3, "trace: Resolver._lookup")
return resolve.ResolverChain._lookup(self, name, cls, type, timeout)
def createResolver(configuration):
setDefaultConfiguration(configuration, ["DNS_RESOLVER"])
if configuration["DNS_RESOLVER"]["FILE"] != "" or len(configuration["DNS_RESOLVER"]["SERVERS"]) != 0:
return Resolver(configuration)
else:
return None
def getDefaultResolver():
return reactor.resolver
def setDefaultResolver(resolver):
reactor.resolver = resolver | mit |
sahilshekhawat/sympy | sympy/geometry/ellipse.py | 7 | 41167 | """Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import print_function, division
from sympy.core import S, sympify, pi
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import oo, Rational
from sympy.core.compatibility import range
from sympy.core.symbol import Dummy
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.geometry.exceptions import GeometryError
from sympy.polys import Poly, PolynomialError, DomainError
from sympy.polys.polyutils import _nsort, _not_a_coeff
from sympy.solvers import solve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity
from .point import Point
from .line import LinearEntity, Line
from .util import _symbol, idiff
import random
from sympy.utilities.decorator import doctest_depends_on
class Ellipse(GeometryEntity):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
from sympy import Integral
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = Dummy('x', real=True)
return 4*self.major*Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point(-2*sqrt(2), 0), Point(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(*self.args)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point(3, 0), Point(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point(0, 0), Point(1, 0))]
>>> e.normal_lines(c)
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point(-38/47, -85/31), Point(9/47, -21/17)),
Line(Point(19/13, -43/21), Point(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*cos(t),
self.center.y + self.vradius*sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point(0, -7), Point(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point(0, -3*sqrt(15)/4), Point(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point(2, -3*sqrt(7)/4), Point(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point(-363/175, -48*sqrt(111)/175), Point(-363/175, 48*sqrt(111)/175), Point(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point(-17/5, -12/5), Point(-17/5, 12/5), Point(7/5, -12/5), Point(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = Dummy('x', real=True)
y = Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
| bsd-3-clause |
shinyChen/browserscope | categories/richtext/handlers.py | 9 | 2481 | #!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for Rich Text Tests"""
__author__ = 'annie.sullivan@gmail.com (Annie Sullivan)'
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import django
from django import http
from django import shortcuts
from django.template import add_to_builtins
add_to_builtins('base.custom_filters')
# Shared stuff
from categories import all_test_sets
from base import decorators
from base import util
CATEGORY = 'richtext'
def About(request):
"""About page."""
overview = """These tests cover browers' implementations of
<a href="http://blog.whatwg.org/the-road-to-html-5-contenteditable">contenteditable</a>
for basic rich text formatting commands. Most browser implementations do very
well at editing the HTML which is generated by their own execCommands. But a
big problem happens when developers try to make cross-browser web
applications using contenteditable - most browsers are not able to correctly
change formatting generated by other browsers. On top of that, most browsers
allow users to to paste arbitrary HTML from other webpages into a
contenteditable region, which is even harder for browsers to properly
format. These tests check how well the execCommand, queryCommandState,
and queryCommandValue functions work with different types of HTML. Please
note that these are WYSIWYG editing tests, not semantic editing tests. Any
HTML which produces a given visual style should be changeable via the
execCommand for that style."""
return util.About(request, CATEGORY, category_title='Rich Text',
overview=overview, show_hidden=False)
def EditableIframe(request):
params = {}
return shortcuts.render_to_response('richtext/templates/editable.html', params)
| apache-2.0 |
niceguydave/cmsplugin-filer | cmsplugin_filer_file/migrations/0002_auto__add_field_filerfile_target_blank.py | 14 | 9411 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FilerFile.target_blank'
db.add_column('cmsplugin_filerfile', 'target_blank',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FilerFile.target_blank'
db.delete_column('cmsplugin_filerfile', 'target_blank')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 29, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_filer_file.filerfile': {
'Meta': {'object_name': 'FilerFile', 'db_table': "'cmsplugin_filerfile'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'target_blank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_filer_file'] | bsd-3-clause |
michaelhogg/SublimeTextXdebug | main.py | 3 | 24300 | import sublime
import sublime_plugin
import os
import sys
import threading
# Load modules
try:
from .xdebug import *
except:
from xdebug import *
# Set Python libraries from system installation
python_path = config.get_value(S.KEY_PYTHON_PATH)
if python_path:
python_path = os.path.normpath(python_path.replace("\\", "/"))
python_dynload = os.path.join(python_path, 'lib-dynload')
if python_dynload not in sys.path:
sys.path.append(python_dynload)
# Define path variables
try:
S.PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
S.PACKAGE_FOLDER = os.path.basename(S.PACKAGE_PATH)
except:
pass
# Initialize package
sublime.set_timeout(lambda: load.xdebug(), 1000)
# Define event listener for view(s)
class EventListener(sublime_plugin.EventListener):
def on_load(self, view):
filename = view.file_name()
# Scroll the view to current breakpoint line
if filename and filename in S.SHOW_ROW_ONLOAD:
V.show_at_row(view, S.SHOW_ROW_ONLOAD[filename])
del S.SHOW_ROW_ONLOAD[filename]
# Render breakpoint markers
sublime.set_timeout(lambda: V.render_regions(view), 0)
def on_activated(self, view):
# Render breakpoint markers
V.render_regions(view)
def on_post_save(self, view):
filename = view.file_name()
# Render breakpoint markers
V.render_regions(view)
# Update config when settings file or sublime-project has been saved
if filename and (filename.endswith(S.FILE_PACKAGE_SETTINGS) or filename.endswith('.sublime-project')):
config.load_package_values()
config.load_project_values()
#TODO: Save new location of breakpoints on save
def on_selection_modified(self, view):
# Show details in output panel of selected variable in context window
if view.name() == V.TITLE_WINDOW_CONTEXT:
V.show_context_output(view)
elif view.name() == V.TITLE_WINDOW_BREAKPOINT:
V.toggle_breakpoint(view)
elif view.name() == V.TITLE_WINDOW_STACK:
V.toggle_stack(view)
elif view.name() == V.TITLE_WINDOW_WATCH:
V.toggle_watch(view)
else:
pass
class XdebugBreakpointCommand(sublime_plugin.TextCommand):
"""
Add/Remove breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit, rows=None, condition=None, enabled=None, filename=None):
# Get filename in current view and check if is a valid filename
if filename is None:
filename = self.view.file_name()
if not filename or not os.path.isfile(filename):
return
# Add entry for file in breakpoint data
if filename not in S.BREAKPOINT:
S.BREAKPOINT[filename] = {}
# When no rows are defined, use selected rows (line numbers), filtering empty rows
if rows is None:
rows = V.region_to_rows(self.view.sel(), filter_empty=True)
# Loop through rows
for row in rows:
expression = None
if condition is not None and len(condition.strip()) > 0:
expression = condition
# Check if breakpoint exists
breakpoint_exists = row in S.BREAKPOINT[filename]
# Disable/Remove breakpoint
if breakpoint_exists:
if S.BREAKPOINT[filename][row]['id'] is not None and session.is_connected(show_status=True):
async_session = session.SocketHandler(session.ACTION_REMOVE_BREAKPOINT, breakpoint_id=S.BREAKPOINT[filename][row]['id'])
async_session.start()
if enabled is False:
S.BREAKPOINT[filename][row]['enabled'] = False
elif enabled is None:
del S.BREAKPOINT[filename][row]
# Add/Enable breakpoint
if not breakpoint_exists or enabled is True:
if row not in S.BREAKPOINT[filename]:
S.BREAKPOINT[filename][row] = { 'id': None, 'enabled': True, 'expression': expression }
else:
S.BREAKPOINT[filename][row]['enabled'] = True
if condition is not None:
S.BREAKPOINT[filename][row]['expression'] = expression
else:
expression = S.BREAKPOINT[filename][row]['expression']
if session.is_connected(show_status=True):
async_session = session.SocketHandler(session.ACTION_SET_BREAKPOINT, filename=filename, lineno=row, expression=expression)
async_session.start()
# Render breakpoint markers
V.render_regions()
# Update breakpoint list
try:
if V.has_debug_view(V.TITLE_WINDOW_BREAKPOINT):
V.show_content(V.DATA_BREAKPOINT)
except:
pass
# Save breakpoint data to file
util.save_breakpoint_data()
class XdebugConditionalBreakpointCommand(sublime_plugin.TextCommand):
"""
Add conditional breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit):
self.view.window().show_input_panel('Breakpoint condition', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, condition):
self.view.run_command('xdebug_breakpoint', {'condition': condition, 'enabled': True})
def on_change(self, line):
pass
def on_cancel(self):
pass
class XdebugClearBreakpointsCommand(sublime_plugin.TextCommand):
"""
Clear breakpoints in selected view.
"""
def run(self, edit):
filename = self.view.file_name()
if filename and filename in S.BREAKPOINT:
rows = H.dictionary_keys(S.BREAKPOINT[filename])
self.view.run_command('xdebug_breakpoint', {'rows': rows, 'filename': filename})
# Continue debug session when breakpoints are cleared on current script being debugged
if S.BREAKPOINT_ROW and self.view.file_name() == S.BREAKPOINT_ROW['filename']:
self.view.window().run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
filename = self.view.file_name()
if filename and S.BREAKPOINT and filename in S.BREAKPOINT and S.BREAKPOINT[filename]:
return True
return False
def is_visible(self):
filename = self.view.file_name()
if filename and S.BREAKPOINT and filename in S.BREAKPOINT and S.BREAKPOINT[filename]:
return True
return False
class XdebugClearAllBreakpointsCommand(sublime_plugin.WindowCommand):
"""
Clear breakpoints from all views.
"""
def run(self):
view = sublime.active_window().active_view()
# Unable to run to line when no view available
if view is None:
return
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
rows = H.dictionary_keys(breakpoint_data)
view.run_command('xdebug_breakpoint', {'rows': rows, 'filename': filename})
# Continue debug session when breakpoints are cleared on current script being debugged
self.window.run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
if S.BREAKPOINT:
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
return True
return False
def is_visible(self):
if S.BREAKPOINT:
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
return True
return False
class XdebugRunToLineCommand(sublime_plugin.WindowCommand):
"""
Run script to current selected line in view, ignoring all other breakpoints.
"""
def run(self):
view = sublime.active_window().active_view()
# Unable to run to line when no view available
if view is None:
return
# Determine filename for current view and check if is a valid filename
filename = view.file_name()
if not filename or not os.path.isfile(filename):
return
# Get first line from selected rows and make sure it is not empty
rows = V.region_to_rows(filter_empty=True)
if rows is None or len(rows) == 0:
return
lineno = rows[0]
# Check if breakpoint does not already exists
breakpoint_exists = False
if filename in S.BREAKPOINT and lineno in S.BREAKPOINT[filename]:
breakpoint_exists = True
# Store line number and filename for temporary breakpoint in session
if not breakpoint_exists:
S.BREAKPOINT_RUN = { 'filename': filename, 'lineno': lineno }
# Set breakpoint and run script
view.run_command('xdebug_breakpoint', {'rows': [lineno], 'enabled': True, 'filename': filename})
self.window.run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
def is_visible(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
class XdebugSessionStartCommand(sublime_plugin.WindowCommand):
"""
Start Xdebug session, listen for request response from debugger engine.
"""
def run(self, launch_browser=False, restart=False):
# Define new session with DBGp protocol
S.SESSION = protocol.Protocol()
S.SESSION_BUSY = False
S.BREAKPOINT_EXCEPTION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
# Set debug layout
self.window.run_command('xdebug_layout')
# Launch browser
if launch_browser or (config.get_value(S.KEY_LAUNCH_BROWSER) and not restart):
util.launch_browser()
# Start thread which will run method that listens for response on configured port
threading.Thread(target=self.listen).start()
def listen(self):
# Start listening for response from debugger engine
S.SESSION.listen()
# On connect run method which handles connection
if S.SESSION and S.SESSION.connected:
sublime.set_timeout(self.connected, 0)
def connected(self):
sublime.set_timeout(lambda: sublime.status_message('Xdebug: Connected'), 100)
async_session = session.SocketHandler(session.ACTION_INIT)
async_session.start()
def is_enabled(self):
if S.SESSION:
return False
return True
def is_visible(self, launch_browser=False):
if S.SESSION:
return False
if launch_browser and (config.get_value(S.KEY_LAUNCH_BROWSER) or not config.get_value(S.KEY_URL)):
return False
return True
class XdebugSessionRestartCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('xdebug_session_stop', {'restart': True})
self.window.run_command('xdebug_session_start', {'restart': True})
sublime.set_timeout(lambda: sublime.status_message('Xdebug: Restarted debugging session. Reload page to continue debugging.'), 100)
def is_enabled(self):
if S.SESSION:
return True
return False
def is_visible(self):
if S.SESSION:
return True
return False
class XdebugSessionStopCommand(sublime_plugin.WindowCommand):
"""
Stop Xdebug session, close connection and stop listening to debugger engine.
"""
def run(self, close_windows=False, launch_browser=False, restart=False):
try:
S.SESSION.clear()
except:
pass
finally:
S.SESSION = None
S.SESSION_BUSY = False
S.BREAKPOINT_EXCEPTION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
# Launch browser
if launch_browser or (config.get_value(S.KEY_LAUNCH_BROWSER) and not restart):
util.launch_browser()
# Close or reset debug layout
if close_windows or config.get_value(S.KEY_CLOSE_ON_STOP):
if config.get_value(S.KEY_DISABLE_LAYOUT):
self.window.run_command('xdebug_layout', {'close_windows': True})
else:
self.window.run_command('xdebug_layout', {'restore': True})
else:
self.window.run_command('xdebug_layout')
# Render breakpoint markers
V.render_regions()
def is_enabled(self):
if S.SESSION:
return True
return False
def is_visible(self, close_windows=False, launch_browser=False):
if S.SESSION:
if close_windows and config.get_value(S.KEY_CLOSE_ON_STOP):
return False
if launch_browser and (config.get_value(S.KEY_LAUNCH_BROWSER) or not config.get_value(S.KEY_URL)):
return False
return True
return False
class XdebugExecuteCommand(sublime_plugin.WindowCommand):
"""
Execute command, handle breakpoints and reload session when page execution has completed.
Keyword arguments:
command -- Command to send to debugger engine.
"""
def run(self, command=None):
async_session = session.SocketHandler(session.ACTION_EXECUTE, command=command)
async_session.start()
def is_enabled(self):
return session.is_connected()
class XdebugContinueCommand(sublime_plugin.WindowCommand):
"""
Continuation commands when on breakpoint, show menu by default if no command has been passed as argument.
Keyword arguments:
command -- Continuation command to execute.
"""
commands = H.new_dictionary()
commands[dbgp.RUN] = 'Run'
commands[dbgp.STEP_OVER] = 'Step Over'
commands[dbgp.STEP_INTO] = 'Step Into'
commands[dbgp.STEP_OUT] = 'Step Out'
commands[dbgp.STOP] = 'Stop'
commands[dbgp.DETACH] = 'Detach'
command_index = H.dictionary_keys(commands)
command_options = H.dictionary_values(commands)
def run(self, command=None):
if not command or not command in self.commands:
self.window.show_quick_panel(self.command_options, self.callback)
else:
self.callback(command)
def callback(self, command):
if command == -1 or S.SESSION_BUSY:
return
if isinstance(command, int):
command = self.command_index[command]
self.window.run_command('xdebug_execute', {'command': command})
def is_enabled(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
def is_visible(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
class XdebugStatusCommand(sublime_plugin.WindowCommand):
"""
Get status from debugger engine.
"""
def run(self):
async_session = session.SocketHandler(session.ACTION_STATUS)
async_session.start()
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugEvaluateCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel('Evaluate', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, expression):
async_session = session.SocketHandler(session.ACTION_EVALUATE, expression=expression)
async_session.start()
def on_change(self, expression):
pass
def on_cancel(self):
pass
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugUserExecuteCommand(sublime_plugin.WindowCommand):
"""
Open input panel, allowing user to execute arbitrary command according to DBGp protocol.
Note: Transaction ID is automatically generated by session module.
"""
def run(self):
self.window.show_input_panel('DBGp command', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, line):
# Split command and arguments, define arguments when only command is defined.
if ' ' in line:
command, args = line.split(' ', 1)
else:
command, args = line, ''
async_session = session.SocketHandler(session.ACTION_USER_EXECUTE, command=command, args=args)
async_session.start()
def on_change(self, line):
pass
def on_cancel(self):
pass
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugWatchCommand(sublime_plugin.WindowCommand):
"""
Add/Edit/Remove watch expression.
"""
def run(self, clear=False, edit=False, remove=False, update=False):
self.edit = edit
self.remove = remove
self.watch_index = None
# Clear watch expressions in list
if clear:
try:
# Python 3.3+
S.WATCH.clear()
except AttributeError:
del S.WATCH[:]
# Update watch view
self.update_view()
# Edit or remove watch expression
elif edit or remove:
# Generate list with available watch expressions
watch_options = []
for index, item in enumerate(S.WATCH):
watch_item = '[{status}] - {expression}'.format(index=index, expression=item['expression'], status='enabled' if item['enabled'] else 'disabled')
watch_options.append(watch_item)
self.window.show_quick_panel(watch_options, self.callback)
elif update:
self.update_view()
# Set watch expression
else:
self.set_expression()
def callback(self, index):
# User has cancelled action
if index == -1:
return
# Make sure index is valid integer
if isinstance(index, int) or H.is_digit(index):
self.watch_index = int(index)
# Edit watch expression
if self.edit:
self.set_expression()
# Remove watch expression
else:
S.WATCH.pop(self.watch_index)
# Update watch view
self.update_view()
def on_done(self, expression):
# User did not set expression
if not expression:
return
# Check if expression is not already defined
matches = [x for x in S.WATCH if x['expression'] == expression]
if matches:
sublime.status_message('Xdebug: Watch expression already defined.')
return
# Add/Edit watch expression in session
watch = {'expression': expression, 'enabled': True, 'value': None, 'type': None}
if self.watch_index is not None and isinstance(self.watch_index, int):
try:
S.WATCH[self.watch_index]['expression'] = expression
except:
S.WATCH.insert(self.watch_index, watch)
else:
S.WATCH.append(watch)
# Update watch view
self.update_view()
def on_change(self, line):
pass
def on_cancel(self):
pass
def set_expression(self):
# Show user input for setting watch expression
self.window.show_input_panel('Watch expression', '', self.on_done, self.on_change, self.on_cancel)
def update_view(self):
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Save watch data to file
util.save_watch_data()
def is_visible(self, clear=False, edit=False, remove=False):
if (clear or edit or remove) and not S.WATCH:
return False
return True
class XdebugViewUpdateCommand(sublime_plugin.TextCommand):
"""
Update content of sublime.Edit object in view, instead of using begin_edit/end_edit.
Keyword arguments:
data -- Content data to populate sublime.Edit object with.
readonly -- Make sublime.Edit object read only.
"""
def run(self, edit, data=None, readonly=False):
view = self.view
view.set_read_only(False)
view.erase(edit, sublime.Region(0, view.size()))
if data is not None:
view.insert(edit, 0, data)
if readonly:
view.set_read_only(True)
class XdebugLayoutCommand(sublime_plugin.WindowCommand):
"""
Toggle between debug and default window layouts.
"""
def run(self, restore=False, close_windows=False, keymap=False):
# Get active window
window = sublime.active_window()
# Do not restore layout or close windows while debugging
if S.SESSION and (restore or close_windows or keymap):
return
# Set layout, unless user disabled debug layout
if not config.get_value(S.KEY_DISABLE_LAYOUT):
if restore or keymap:
V.set_layout('normal')
else:
V.set_layout('debug')
# Close all debugging related windows
if close_windows or restore or keymap:
V.close_debug_windows()
return
# Reset data in debugging related windows
V.show_content(V.DATA_BREAKPOINT)
V.show_content(V.DATA_CONTEXT)
V.show_content(V.DATA_STACK)
V.show_content(V.DATA_WATCH)
panel = window.get_output_panel('xdebug')
panel.run_command("xdebug_view_update")
# Close output panel
window.run_command('hide_panel', {"panel": 'output.xdebug'})
def is_enabled(self, restore=False, close_windows=False):
disable_layout = config.get_value(S.KEY_DISABLE_LAYOUT)
if close_windows and (not disable_layout or not V.has_debug_view()):
return False
if restore and disable_layout:
return False
return True
def is_visible(self, restore=False, close_windows=False):
if S.SESSION:
return False
disable_layout = config.get_value(S.KEY_DISABLE_LAYOUT)
if close_windows and (not disable_layout or not V.has_debug_view()):
return False
if restore and disable_layout:
return False
if restore:
try:
return sublime.active_window().get_layout() == config.get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)
except:
pass
return True
class XdebugSettingsCommand(sublime_plugin.WindowCommand):
"""
Show settings file.
"""
def run(self, default=True):
# Show default settings in package when available
if default and S.PACKAGE_FOLDER is not None:
package = S.PACKAGE_FOLDER
# Otherwise show User defined settings
else:
package = "User"
# Strip .sublime-package of package name for syntax file
package_extension = ".sublime-package"
if package.endswith(package_extension):
package = package[:-len(package_extension)]
# Open settings file
self.window.run_command('open_file', {'file': '${packages}/' + package + '/' + S.FILE_PACKAGE_SETTINGS }); | mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/event_handling/pick_event_demo.py | 4 | 6436 | #!/usr/bin/env python
"""
You can enable picking by setting the "picker" property of an artist
(for example, a matplotlib Line2D, Text, Patch, Polygon, AxesImage,
etc...)
There are a variety of meanings of the picker property
None - picking is disabled for this artist (default)
boolean - if True then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
float - if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, for example, the indices of the data within
epsilon of the pick event
function - if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event.
hit, props = picker(artist, mouseevent)
to determine the hit test. If the mouse event is over the
artist, return hit=True and props is a dictionary of properties
you want added to the PickEvent attributes
After you have enabled an artist for picking by setting the "picker"
property, you need to connect to the figure canvas pick_event to get
pick callbacks on mouse press events. For example,
def pick_handler(event):
mouseevent = event.mouseevent
artist = event.artist
# now do something with this...
The pick event (matplotlib.backend_bases.PickEvent) which is passed to
your callback is always fired with two attributes:
mouseevent - the mouse event that generate the pick event. The
mouse event in turn has attributes like x and y (the coordinates in
display space, such as pixels from left, bottom) and xdata, ydata (the
coords in data space). Additionally, you can get information about
which buttons were pressed, which keys were pressed, which Axes
the mouse is over, etc. See matplotlib.backend_bases.MouseEvent
for details.
artist - the matplotlib.artist that generated the pick event.
Additionally, certain artists like Line2D and PatchCollection may
attach additional meta data like the indices into the data that meet
the picker criteria (for example, all the points in the line that are within
the specified epsilon tolerance)
The examples below illustrate each of these methods.
"""
from __future__ import print_function
from matplotlib.pyplot import figure, show
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.image import AxesImage
import numpy as np
from numpy.random import rand
if 1: # simple picking, lines, rectangles and text
fig = figure()
ax1 = fig.add_subplot(211)
ax1.set_title('click on points, rectangles or text', picker=True)
ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red'))
line, = ax1.plot(rand(100), 'o', picker=5) # 5 points tolerance
# pick the rectangle
ax2 = fig.add_subplot(212)
bars = ax2.bar(range(10), rand(10), picker=True)
for label in ax2.get_xticklabels(): # make the xtick labels pickable
label.set_picker(True)
def onpick1(event):
if isinstance(event.artist, Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print('onpick1 line:', zip(np.take(xdata, ind), np.take(ydata, ind)))
elif isinstance(event.artist, Rectangle):
patch = event.artist
print('onpick1 patch:', patch.get_path())
elif isinstance(event.artist, Text):
text = event.artist
print('onpick1 text:', text.get_text())
fig.canvas.mpl_connect('pick_event', onpick1)
if 1: # picking with a custom hit test function
# you can define custom pickers by setting picker to a callable
# function. The function has the signature
#
# hit, props = func(artist, mouseevent)
#
# to determine the hit test. if the mouse event is over the artist,
# return hit=True and props is a dictionary of
# properties you want added to the PickEvent attributes
def line_picker(line, mouseevent):
"""
find the points within a certain distance from the mouseclick in
data coords and attach some extra attributes, pickx and picky
which are the data points that were picked
"""
if mouseevent.xdata is None: return False, dict()
xdata = line.get_xdata()
ydata = line.get_ydata()
maxd = 0.05
d = np.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.)
ind = np.nonzero(np.less_equal(d, maxd))
if len(ind):
pickx = np.take(xdata, ind)
picky = np.take(ydata, ind)
props = dict(ind=ind, pickx=pickx, picky=picky)
return True, props
else:
return False, dict()
def onpick2(event):
print('onpick2 line:', event.pickx, event.picky)
fig = figure()
ax1 = fig.add_subplot(111)
ax1.set_title('custom picker for line data')
line, = ax1.plot(rand(100), rand(100), 'o', picker=line_picker)
fig.canvas.mpl_connect('pick_event', onpick2)
if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, np.take(x, ind), np.take(y, ind))
fig = figure()
ax1 = fig.add_subplot(111)
col = ax1.scatter(x, y, 100*s, c, picker=True)
#fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event', onpick3)
if 1: # picking images (matplotlib.image.AxesImage)
fig = figure()
ax1 = fig.add_subplot(111)
im1 = ax1.imshow(rand(10,5), extent=(1,2,1,2), picker=True)
im2 = ax1.imshow(rand(5,10), extent=(3,4,1,2), picker=True)
im3 = ax1.imshow(rand(20,25), extent=(1,2,3,4), picker=True)
im4 = ax1.imshow(rand(30,12), extent=(3,4,3,4), picker=True)
ax1.axis([0,5,0,5])
def onpick4(event):
artist = event.artist
if isinstance(artist, AxesImage):
im = artist
A = im.get_array()
print('onpick4 image', A.shape)
fig.canvas.mpl_connect('pick_event', onpick4)
show()
| mit |
JakeLowey/HackRPI2 | django/conf/locale/lv/formats.py | 316 | 1490 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y. \g\a\d\a j. F'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'Y. \g\a\d\a j. F, H:i:s'
YEAR_MONTH_FORMAT = r'Y. \g. F'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = r'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' ' # Non-breaking space
NUMBER_GROUPING = 3
| mit |
DarthThanatos/citySimNG | citySimNGView/extra/scrollable_wx_matplotlib.py | 1 | 2647 | from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
import wx
class MyFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'scrollable plot',
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER,
size=(800, 400))
self.panel = wx.Panel(self, -1)
self.fig = Figure((5, 4), 75)
self.canvas = FigureCanvasWxAgg(self.panel, -1, self.fig)
self.scroll_range = 400
self.canvas.SetScrollbar(wx.HORIZONTAL, 0, 5,
self.scroll_range)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, -1, wx.EXPAND)
self.panel.SetSizer(sizer)
self.panel.Fit()
self.init_data()
self.init_plot()
self.canvas.Bind(wx.EVT_SCROLLWIN, self.OnScrollEvt)
def init_data(self):
# Generate some data to plot:
self.dt = 0.01
self.t = arange(0, 5, self.dt)
self.x = sin(2 * pi * self.t)
# Extents of data sequence:
self.i_min = 0
self.i_max = len(self.t)
# Size of plot window:
self.i_window = 100
# Indices of data interval to be plotted:
self.i_start = 0
self.i_end = self.i_start + self.i_window
def init_plot(self):
self.axes = self.fig.add_subplot(111)
self.plot_data = \
self.axes.plot(self.t[self.i_start:self.i_end],
self.x[self.i_start:self.i_end])[0]
def draw_plot(self):
# Update data in plot:
self.plot_data.set_xdata(self.t[self.i_start:self.i_end])
self.plot_data.set_ydata(self.x[self.i_start:self.i_end])
# Adjust plot limits:
self.axes.set_xlim((min(self.t[self.i_start:self.i_end]),
max(self.t[self.i_start:self.i_end])))
self.axes.set_ylim((min(self.x[self.i_start:self.i_end]),
max(self.x[self.i_start:self.i_end])))
# Redraw:
self.canvas.draw()
def OnScrollEvt(self, event):
# Update the indices of the plot:
self.i_start = self.i_min + event.GetPosition()
self.i_end = self.i_min + self.i_window + event.GetPosition()
self.draw_plot()
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(parent=None, id=-1)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
if __name__ == '__main__':
app = MyApp()
app.MainLoop() | apache-2.0 |
uclouvain/osis_louvain | cms/enums/entity_name.py | 1 | 1447 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
LEARNING_UNIT_YEAR = "learning_unit_year"
OFFER_YEAR = "offer_year"
ENTITY_NAME = ((LEARNING_UNIT_YEAR, LEARNING_UNIT_YEAR),
(OFFER_YEAR, OFFER_YEAR))
| agpl-3.0 |
calebfoss/tensorflow | tensorflow/contrib/learn/python/learn/estimators/run_config_test.py | 16 | 7847 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""run_config.py tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
patch = test.mock.patch
class RunConfigTest(test.TestCase):
def test_defaults_with_no_tf_config(self):
config = run_config.RunConfig()
self.assertEquals(config.master, "")
self.assertEquals(config.task_id, 0)
self.assertEquals(config.num_ps_replicas, 0)
self.assertEquals(config.cluster_spec, {})
self.assertIsNone(config.task_type)
self.assertTrue(config.is_chief)
self.assertEquals(config.evaluation_master, "")
def test_values_from_tf_config(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 1
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEquals(config.master, "grpc://host4:4")
self.assertEquals(config.task_id, 1)
self.assertEquals(config.num_ps_replicas, 2)
self.assertEquals(config.cluster_spec.as_dict(), tf_config["cluster"])
self.assertEquals(config.task_type, run_config_lib.TaskType.WORKER)
self.assertFalse(config.is_chief)
self.assertEquals(config.evaluation_master, "")
def test_explicitly_specified_values(self):
cluster_spec = {
run_config_lib.TaskType.PS: ["localhost:9990"],
"my_job_name": ["localhost:9991", "localhost:9992", "localhost:0"]
}
tf_config = {
"cluster": cluster_spec,
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 2
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig(
master="localhost:0", evaluation_master="localhost:9991")
self.assertEquals(config.master, "localhost:0")
self.assertEquals(config.task_id, 2)
self.assertEquals(config.num_ps_replicas, 1)
self.assertEquals(config.cluster_spec, server_lib.ClusterSpec(cluster_spec))
self.assertEquals(config.task_type, run_config_lib.TaskType.WORKER)
self.assertFalse(config.is_chief)
self.assertEquals(config.evaluation_master, "localhost:9991")
def test_single_node_in_cluster_spec_produces_empty_master(self):
tf_config = {"cluster": {run_config_lib.TaskType.WORKER: ["host1:1"]}}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEquals(config.master, "")
def test_no_task_type_produces_empty_master(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
# Omits "task": {"type": "worker}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEquals(config.master, "")
def test_invalid_job_name_raises(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": "not_in_cluster_spec"
}
}
expected_msg_regexp = "not_in_cluster_spec is not a valid task"
with patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), self.assertRaisesRegexp(
ValueError, expected_msg_regexp):
run_config.RunConfig()
def test_illegal_task_index_raises(self):
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.WORKER: ["host3:3", "host4:4", "host5:5"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 3
}
}
expected_msg_regexp = "3 is not a valid task_id"
with patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), self.assertRaisesRegexp(
ValueError, expected_msg_regexp):
run_config.RunConfig()
def test_is_chief_from_cloud_tf_config(self):
# is_chief should be true when ["task"]["type"] == "master" and
# index == 0 and ["task"]["environment"] == "cloud". Note that
# test_values_from_tf_config covers the non-master case.
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.MASTER,
"index": 0
},
"environment": "cloud"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertTrue(config.is_chief)
def test_is_chief_from_noncloud_tf_config(self):
# is_chief should be true when ["task"]["type"] == "worker" and
# index == 0 if ["task"]["environment"] != "cloud".
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.WORKER,
"index": 0
},
"environment": "random"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertTrue(config.is_chief)
# But task 0 for a job named "master" should not be.
tf_config = {
"cluster": {
run_config_lib.TaskType.PS: ["host1:1", "host2:2"],
run_config_lib.TaskType.MASTER: ["host3:3"],
run_config_lib.TaskType.WORKER: ["host4:4", "host5:5", "host6:6"]
},
"task": {
"type": run_config_lib.TaskType.MASTER,
"index": 0
},
"environment": "random"
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertFalse(config.is_chief)
def test_default_is_chief_from_tf_config_without_job_name(self):
tf_config = {"cluster": {}, "task": {}}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertTrue(config.is_chief)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pombreda/iris-panel | iris/packagedb/tests/test_apiviews.py | 7 | 12682 | # -*- coding: utf-8 -*-
#This file is part of IRIS: Infrastructure and Release Information System
#
# Copyright (C) 2013 Intel Corporation
#
# IRIS is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2.0 as published by the Free Software Foundation.
"""
This is the REST framework test class for the iris-packagedb project REST API.
"""
#pylint: disable=no-member,missing-docstring,invalid-name
#E:397,18: Instance of 'HttpResponse' has no 'data' member (no-member)
#C: 36, 0: Missing function docstring (missing-docstring)
#C: 96, 8: Invalid variable name "d" (invalid-name)
import base64
import urllib
from django.test import TestCase
from django.contrib.auth.models import User
from iris.core.models import (
Domain, SubDomain, GitTree, Package, Product, License, DomainRole,
SubDomainRole, GitTreeRole)
def sort_data(data):
if isinstance(data, list):
data.sort()
for item in data:
sort_data(item)
if isinstance(data, dict):
for value in data.itervalues():
sort_data(value)
class ProductsTests(TestCase):
"""
The REST framework test case class of Product APIView
"""
def setUp(self):
"""
Create 2 Product instance. One includes 2 gittrees, the other includes
1 gittree.
Create 2 test user.
"""
user = User.objects.create_user(username='nemo', password='password')
d = Domain.objects.create(name='doamin')
sd = SubDomain.objects.create(name='subdoamin', domain=d)
gt1 = GitTree.objects.create(gitpath='a/b', subdomain=sd)
gt2 = GitTree.objects.create(gitpath='c/d', subdomain=sd)
p1 = Product.objects.create(name='product', description='product1')
p2 = Product.objects.create(name='a:b', description='product2')
p1.gittrees.add(gt1, gt2)
p2.gittrees.add(gt2)
def test_get_info(self):
"""
GET requests to APIView should return list of objects.
"""
url = '/api/packagedb/products/'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = [{
'name': 'a:b',
'description': 'product2',
'gittrees': ['c/d']
}, {
'name': 'product',
'description': 'product1',
'gittrees': ['a/b', 'c/d']
}]
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
def test_get_detail(self):
"""
GET requests to APIView should return a single object.
"""
url = "/api/packagedb/products/a:b/"
url = urllib.quote(url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
'name': 'a:b',
'description': 'product2',
'gittrees': ['c/d'],
}
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
def test_get_not_deleted_detail(self):
"""
GET requests to APIView should raise 404
If it does not currently exist.
"""
url = "/api/packagedb/products/999/"
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class DomainsTests(TestCase):
"""
The REST framework test case class of Domain APIView
"""
def setUp(self):
"""
Create 2 SubDomain instance, one of them is 'Uncategorized',
one domainrole, one subdomainrole.
Create 2 test user.
"""
user = User.objects.create_user(
username='nemo', password='password', email='nemo@a.com')
user2 = User.objects.create_user(
username='lucy', password='lucy',
first_name='jaeho81', last_name='lucy',
email='jaeho81.lucy@a.com')
d1 = Domain.objects.create(name='domain1')
d2 = Domain.objects.create(name='domain2')
sd1 = SubDomain.objects.create(name='subdomain', domain=d1)
SubDomain.objects.create(name='Uncategorized', domain=d2)
dr = DomainRole.objects.create(
role='Architect', domain=d2,
name="%s: %s" % ('Architect', d2.name))
user.groups.add(dr)
user2.groups.add(dr)
sdr = SubDomainRole.objects.create(
role='Maintainer', subdomain=sd1,
name="%s: %s" % ('Maintainer', sd1.name))
user.groups.add(sdr)
user2.groups.add(sdr)
def test_get_info(self):
"""
GET requests to APIView should return list of objects.
"""
url = '/api/packagedb/domains/'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = [{
'name': 'domain1 / subdomain',
'roles': {
'Maintainer': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com',
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}]
},
}, {
'name': 'domain2 / Uncategorized',
'roles': {
'Architect': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com'
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}],
},
}]
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
def test_get_detail(self):
"""
GET requests to APIView should return single objects.
"""
url = '/api/packagedb/domains/domain2 / Uncategorized/'
url = urllib.quote(url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
'name': 'domain2 / Uncategorized',
'roles': {
'Architect': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com'
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}],
}
}
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
class GitTreesTests(TestCase):
"""
The REST framework test case class of GitTree APIView
"""
def setUp(self):
"""
Create 2 GitTree instance. One realted with domain, two packages,
another one realted with subdomain, one license.
with subdomain,
Create 2 test user.
"""
user1 = User.objects.create_user(
username='nemo', password='password', email='nemo@a.com')
user2 = User.objects.create_user(
username='lucy', password='password',
first_name='jaeho81', last_name='lucy',
email='jaeho81.lucy@a.com')
domain = Domain.objects.create(name='domain')
sd1 = SubDomain.objects.create(name='subdomain', domain=domain)
sd2 = SubDomain.objects.create(name='Uncategorized', domain=domain)
gt1 = GitTree.objects.create(gitpath='d/f', subdomain=sd1)
gt2 = GitTree.objects.create(gitpath='a/b/c', subdomain=sd2)
p1 = Package.objects.create(name='xap1')
p2 = Package.objects.create(name='p2')
gt1.packages.add(p1, p2)
gt2.packages.add(p2)
l1 = License.objects.create(shortname='license1',
fullname='labc def',
text='helo')
l2 = License.objects.create(shortname='abc',
fullname='weldome sdfs',
text='helo world')
gt2.licenses.add(l1, l2)
gr1 = GitTreeRole.objects.create(
role='Integrator', gittree=gt1,
name='Integrator: %s' % gt1.gitpath)
user1.groups.add(gr1)
user2.groups.add(gr1)
gr2 = GitTreeRole.objects.create(
role='Maintainer', gittree=gt2,
name='Integrator: %s' % gt2.gitpath)
user1.groups.add(gr2)
user2.groups.add(gr2)
def test_get_info(self):
"""
GET requests to APIView should return list of objects.
"""
url = '/api/packagedb/gittrees/'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = [{
'gitpath': 'a/b/c',
'domain': 'domain / Uncategorized',
'roles': {
'Maintainer': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com'
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}],
},
'packages': ['p2'],
'licenses': ['license1', 'abc'],
}, {
'gitpath': 'd/f',
'domain': 'domain / subdomain',
'roles': {
'Integrator': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com'
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}]
},
'packages': ['xap1', 'p2'],
'licenses': [],
}]
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
def test_get_detail(self):
"""
GET requests to APIView should return single objects.
"""
url = '/api/packagedb/gittrees/d/f/'
url = urllib.quote(url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
'gitpath': 'd/f',
'domain': 'domain / subdomain',
'roles': {
'Integrator': [{
'first_name': '',
'last_name': '',
'email': 'nemo@a.com'
}, {
'first_name': 'jaeho81',
'last_name': 'lucy',
'email': 'jaeho81.lucy@a.com'
}],
},
'packages': ['xap1', 'p2'],
'licenses': [],
}
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
class PackagesTests(TestCase):
"""
The REST framework test case class of Package APIView
"""
def setUp(self):
"""
Create 2 Package instance, one realted with two gittrees, the other
related with 1 gittree.
Create 1 test user.
"""
user = User.objects.create_user(username='nemo', password='password')
domain = Domain.objects.create(name='domain')
subdomain = SubDomain.objects.create(name='subdomain', domain=domain)
gt1 = GitTree.objects.create(gitpath='agitpath1', subdomain=subdomain)
gt2 = GitTree.objects.create(gitpath='gitpath2', subdomain=subdomain)
pack1 = Package.objects.create(name='package1')
pack2 = Package.objects.create(name='package2')
gt1.packages.add(pack1, pack2)
gt2.packages.add(pack2)
def test_get_info(self):
"""
GET requests to APIView should return list of objects.
"""
url = '/api/packagedb/packages/'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = [{
'name': 'package1',
'gittrees': ['agitpath1']
}, {
'name': 'package2',
'gittrees': ['gitpath2', 'agitpath1']
}]
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
def test_get_detail(self):
"""
GET requests to APIView should return a single object.
"""
url = '/api/packagedb/packages/package2/'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {'name': 'package2', 'gittrees': ['agitpath1', 'gitpath2']}
sort_data(data)
sort_data(response.data)
self.assertEqual(response.data, data)
| gpl-2.0 |
garrettcap/Bulletproof-Backup | wx/lib/dragscroller.py | 7 | 2580 | #-----------------------------------------------------------------------------
# Name: dragscroller.py
# Purpose: Scrolls a wx.ScrollWindow by dragging
#
# Author: Riaan Booysen
#
# Created: 2006/09/05
# Copyright: (c) 2006
# Licence: wxPython
#-----------------------------------------------------------------------------
import wx
class DragScroller:
""" Scrolls a wx.ScrollWindow in the direction and speed of a mouse drag.
Call Start with the position of the drag start.
Call Stop on the drag release. """
def __init__(self, scrollwin, rate=30, sensitivity=0.75):
self.scrollwin = scrollwin
self.rate = rate
self.sensitivity = sensitivity
self.pos = None
self.timer = None
def GetScrollWindow(self):
return self.scrollwin
def SetScrollWindow(self, scrollwin):
self.scrollwin = scrollwin
def GetUpdateRate(self):
return self.rate
def SetUpdateRate(self, rate):
self.rate = rate
def GetSensitivity(self):
return self.sensitivity
def SetSensitivity(self, sensitivity):
self.sensitivity = sensitivity
def Start(self, pos):
""" Start a drag scroll operation """
if not self.scrollwin:
raise Exception, 'No ScrollWindow defined'
self.pos = pos
self.scrollwin.SetCursor(wx.StockCursor(wx.CURSOR_SIZING))
if not self.scrollwin.HasCapture():
self.scrollwin.CaptureMouse()
self.timer = wx.Timer(self.scrollwin)
self.scrollwin.Bind(wx.EVT_TIMER, self.OnTimerDoScroll, id=self.timer.GetId())
self.timer.Start(self.rate)
def Stop(self):
""" Stops a drag scroll operation """
if self.timer and self.scrollwin:
self.timer.Stop()
self.scrollwin.Disconnect(self.timer.GetId())
self.timer.Destroy()
self.timer = None
self.scrollwin.SetCursor(wx.STANDARD_CURSOR)
if self.scrollwin.HasCapture():
self.scrollwin.ReleaseMouse()
def OnTimerDoScroll(self, event):
if self.pos is None or not self.timer or not self.scrollwin:
return
new = self.scrollwin.ScreenToClient(wx.GetMousePosition())
dx = int((new.x-self.pos.x)*self.sensitivity)
dy = int((new.y-self.pos.y)*self.sensitivity)
spx = self.scrollwin.GetScrollPos(wx.HORIZONTAL)
spy = self.scrollwin.GetScrollPos(wx.VERTICAL)
self.scrollwin.Scroll(spx+dx, spy+dy)
| gpl-2.0 |
Pexego/odoo | addons/l10n_us/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
glmcdona/meddle | examples/base/Lib/sndhdr.py | 257 | 5973 | """Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, IOError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ["what","whathdr"]
def what(filename):
"""Guess the type of a sound file"""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers"""
f = open(filename, 'rb')
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return res
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if h[:4] != 'FORM':
return None
if h[8:12] == 'AIFC':
fmt = 'aifc'
elif h[8:12] == 'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.openfp(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(), \
a.getnframes(), 8*a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h[:4] == '.snd':
f = get_long_be
elif h[:4] in ('\0ds.', 'dns.'):
f = get_long_le
else:
return None
type = 'au'
hdr_size = f(h[4:8])
data_size = f(h[8:12])
encoding = f(h[12:16])
rate = f(h[16:20])
nchannels = f(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
return type, rate, nchannels, data_size//frame_size, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != 'FSSD' or h[128:132] != 'HCOM':
return None
divisor = get_long_be(h[128+16:128+20])
return 'hcom', 22050//divisor, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if h[:20] != 'Creative Voice File\032':
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == '\1':
ratecode = ord(h[sbseek+4])
rate = int(1000000.0 / (256 - ratecode))
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ':
return None
style = get_short_le(h[20:22])
nchannels = get_short_le(h[22:24])
rate = get_long_le(h[24:28])
sample_bits = get_short_le(h[34:36])
return 'wav', rate, nchannels, -1, sample_bits
tests.append(test_wav)
def test_8svx(h, f):
if h[:4] != 'FORM' or h[8:12] != '8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h[:5] == 'SOUND':
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h[:2] == '\0\0':
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#---------------------------------------------#
# Subroutines to extract numbers from strings #
#---------------------------------------------#
def get_long_be(s):
return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
def get_long_le(s):
return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
def get_short_be(s):
return (ord(s[0])<<8) | ord(s[1])
def get_short_le(s):
return (ord(s[1])<<8) | ord(s[0])
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
if __name__ == '__main__':
test()
| mit |
toshywoshy/ansible | lib/ansible/modules/network/netscaler/netscaler_cs_policy.py | 124 | 9505 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_cs_policy
short_description: Manage content switching policy
description:
- Manage content switching policy.
- "This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance."
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
policyname:
description:
- >-
Name for the content switching policy. Must begin with an ASCII alphanumeric or underscore C(_)
character, and must contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon
C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Cannot be changed after a policy is
created.
- "The following requirement applies only to the NetScaler CLI:"
- >-
If the name includes one or more spaces, enclose the name in double or single quotation marks (for
example, my policy or my policy).
- "Minimum length = 1"
url:
description:
- >-
URL string that is matched with the URL of a request. Can contain a wildcard character. Specify the
string value in the following format: C([[prefix] [*]] [.suffix]).
- "Minimum length = 1"
- "Maximum length = 208"
rule:
description:
- >-
Expression, or name of a named expression, against which traffic is evaluated. Written in the classic
or default syntax.
- "Note:"
- >-
Maximum length of a string literal in the expression is 255 characters. A longer string can be split
into smaller strings of up to 255 characters each, and the smaller strings concatenated with the +
operator. For example, you can create a 500-character string as follows: '"<string of 255
characters>" + "<string of 245 characters>"'
domain:
description:
- "The domain name. The string value can range to 63 characters."
- "Minimum length = 1"
action:
description:
- >-
Content switching action that names the target load balancing virtual server to which the traffic is
switched.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Create url cs policy
delegate_to: localhost
netscaler_cs_policy:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
policyname: policy_1
url: /example/
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Could not load nitro python sdk"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'url': 'difference. ours: (str) example1 other: (str) /example1' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, ensure_feature_is_enabled
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.cs.cspolicy import cspolicy
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def policy_exists(client, module):
log('Checking if policy exists')
if cspolicy.count_filtered(client, 'policyname:%s' % module.params['policyname']) > 0:
return True
else:
return False
def policy_identical(client, module, cspolicy_proxy):
log('Checking if defined policy is identical to configured')
if cspolicy.count_filtered(client, 'policyname:%s' % module.params['policyname']) == 0:
return False
policy_list = cspolicy.get_filtered(client, 'policyname:%s' % module.params['policyname'])
diff_dict = cspolicy_proxy.diff_object(policy_list[0])
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def diff_list(client, module, cspolicy_proxy):
policy_list = cspolicy.get_filtered(client, 'policyname:%s' % module.params['policyname'])
return cspolicy_proxy.diff_object(policy_list[0])
def main():
module_specific_arguments = dict(
policyname=dict(type='str'),
url=dict(type='str'),
rule=dict(type='str'),
domain=dict(type='str'),
action=dict(type='str'),
)
hand_inserted_arguments = dict(
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'policyname',
'url',
'rule',
'domain',
'action',
]
readonly_attrs = [
'vstype',
'hits',
'bindhits',
'labelname',
'labeltype',
'priority',
'activepolicy',
'cspolicytype',
]
transforms = {
}
# Instantiate config proxy
cspolicy_proxy = ConfigProxy(
actual=cspolicy(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'CS')
# Apply appropriate state
if module.params['state'] == 'present':
log('Sanity checks for state present')
if not policy_exists(client, module):
if not module.check_mode:
cspolicy_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not policy_identical(client, module, cspolicy_proxy):
if not module.check_mode:
cspolicy_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not policy_exists(client, module):
module.fail_json(msg='Policy does not exist', **module_result)
if not policy_identical(client, module, cspolicy_proxy):
module.fail_json(msg='Policy differs from configured', diff=diff_list(client, module, cspolicy_proxy), **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if policy_exists(client, module):
if not module.check_mode:
cspolicy_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if policy_exists(client, module):
module.fail_json(msg='Policy still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
3nids/QGIS | python/plugins/processing/script/AddScriptFromTemplateAction.py | 45 | 1877 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AddScriptFromTemplateAction.py
---------------------
Date : August 2012
Copyright : (C) 2018 by Matteo Ghetta
Email : matteo dot ghetta at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matteo Ghetta'
__date__ = 'March 2018'
__copyright__ = '(C) 2018, Matteo Ghetta'
import os
import codecs
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.gui.ToolboxAction import ToolboxAction
from processing.script.ScriptEditorDialog import ScriptEditorDialog
class AddScriptFromTemplateAction(ToolboxAction):
def __init__(self):
self.name = QCoreApplication.translate("AddScriptFromTemplate", "Create New Script from Template…")
self.group = self.tr("Tools")
def execute(self):
dlg = ScriptEditorDialog(None)
pluginPath = os.path.split(os.path.dirname(__file__))[0]
templatePath = os.path.join(
pluginPath, 'script', 'ScriptTemplate.py')
with codecs.open(templatePath, 'r', encoding='utf-8') as f:
templateTxt = f.read()
dlg.editor.setText(templateTxt)
dlg.show()
| gpl-2.0 |
ambitioninc/django-localized-recurrence | localized_recurrence/tests/models_tests.py | 1 | 23235 | from datetime import datetime, timedelta
from django.test import TestCase
from django_dynamic_fixture import G
import pytz
from ..models import LocalizedRecurrence, LocalizedRecurrenceQuerySet
from ..models import _replace_with_offset, _update_schedule
class LocalizedRecurrenceUpdateTest(TestCase):
"""
Tests calling 'update' on a localized recurrence.
"""
def test_update_creation(self):
lr = LocalizedRecurrence()
lr.update()
self.assertIsNotNone(lr.id)
def test_update_timezone(self):
lr = G(LocalizedRecurrence)
lr.update(timezone='US/Eastern')
lr = LocalizedRecurrence.objects.get(id=lr.id)
self.assertEqual(lr.timezone, pytz.timezone('US/Eastern'))
def test_update_offset(self):
lr = G(LocalizedRecurrence)
lr.update(offset=timedelta(seconds=1))
lr = LocalizedRecurrence.objects.get(id=lr.id)
self.assertEqual(lr.offset, timedelta(seconds=1))
class LocalizedRecurrenceQuerySetTest(TestCase):
"""Simple test to ensure the custom query set is being used.
"""
def test_isinstance(self):
G(LocalizedRecurrence)
recurrences = LocalizedRecurrence.objects.all()
self.assertIsInstance(recurrences, LocalizedRecurrenceQuerySet)
class LocalizedRecurrenceQuerySetUpdateScheduleTest(TestCase):
"""Test that updates to recurrences are reflected in the DB.
"""
def setUp(self):
G(LocalizedRecurrence, interval='DAY', offset=timedelta(hours=12), timezone=pytz.timezone('US/Eastern'))
G(LocalizedRecurrence, interval='MONTH', offset=timedelta(hours=15), timezone=pytz.timezone('US/Eastern'))
def test_update_from_1970(self):
"""Start with next_scheduled of 1970, after update should be new.
"""
time = datetime(year=2013, month=5, day=20, hour=12, minute=3)
LocalizedRecurrence.objects.filter(interval='DAY').update_schedule(time=time)
lr_day = LocalizedRecurrence.objects.filter(interval='DAY').first()
self.assertGreater(lr_day.next_scheduled, time)
class LocalizedRecurrenceManagerUpdateScheduleTest(TestCase):
def setUp(self):
G(LocalizedRecurrence, interval='DAY', offset=timedelta(hours=12), timezone=pytz.timezone('US/Eastern'))
G(LocalizedRecurrence, interval='MONTH', offset=timedelta(hours=15), timezone=pytz.timezone('US/Eastern'))
def test_update_all(self):
"""Calls to the model manager to update should be passed through.
"""
time = datetime(year=2013, month=5, day=20, hour=15, minute=3)
LocalizedRecurrence.objects.update_schedule(time=time)
self.assertTrue(all(r.next_scheduled > time for r in LocalizedRecurrence.objects.all()))
class LocalizedRecurrenceTest(TestCase):
"""Test the creation and querying of LocalizedRecurrence records.
"""
def setUp(self):
G(LocalizedRecurrence, interval='DAY', offset=timedelta(hours=12), timezone=pytz.timezone('US/Eastern'))
def test_timedelta_returned(self):
"""Test that the Duration field is correctly returning timedeltas.
"""
lr = LocalizedRecurrence.objects.first()
self.assertTrue(isinstance(lr.offset, timedelta))
def test_string_representation(self):
lr = LocalizedRecurrence.objects.first()
self.assertEqual(
str(lr),
'ID: {0}, Interval: {1}, Next Scheduled: {2}'.format(lr.id, lr.interval, lr.next_scheduled),
)
class LocalizedRecurrenceUpdateScheduleTest(TestCase):
def setUp(self):
self.lr_day = G(LocalizedRecurrence,
interval='DAY', offset=timedelta(hours=12), timezone=pytz.timezone('US/Eastern'))
def test_update_passes_through(self):
time = datetime(year=2013, month=5, day=20, hour=15, minute=3)
self.lr_day.update_schedule(time)
self.assertGreater(self.lr_day.next_scheduled, time)
class LocalizedRecurrenceUtcOfNextScheduleTest(TestCase):
def setUp(self):
self.lr_day = G(
LocalizedRecurrence,
interval='DAY', offset=timedelta(hours=12),
timezone=pytz.timezone('US/Eastern'))
self.lr_week = G(
LocalizedRecurrence,
interval='WEEK', offset=timedelta(days=3, hours=17, minutes=30),
timezone=pytz.timezone('US/Central'))
self.lr_month = G(
LocalizedRecurrence,
interval='MONTH', offset=timedelta(days=21, hours=19, minutes=15, seconds=10),
timezone=pytz.timezone('US/Central'))
self.lr_quarter = G(
LocalizedRecurrence,
interval='QUARTER', offset=timedelta(days=68, hours=16, minutes=30),
timezone=pytz.timezone('Asia/Hong_Kong'))
self.lr_year = G(
LocalizedRecurrence,
interval='YEAR', offset=timedelta(days=31, hours=16, minutes=30),
timezone=pytz.timezone('Asia/Hong_Kong'))
def test_basic_works(self):
"""
Test a simple case of utc_of_next_schedule.
- The given recurrence is scheduled daily for Eastern Time at noon.
- The given current date in UTC is 2013/1/15::17:05:22
- We then expect the next schedule in UTC to be 2013/1/16::17:0:0
"""
current_time = datetime(2013, 1, 15, 17, 5, 22)
expected_next_schedule = datetime(2013, 1, 16, 17)
schedule_out = self.lr_day.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_dst_cross_monthly(self):
"""The case when a monthly recurrence goes past daylight savings time"""
self.lr_month.offset = timedelta(hours=0)
self.lr_month.previous_scheduled = datetime(2015, 2, 1, 6)
scheduled_out = self.lr_month.utc_of_next_schedule(datetime(2015, 3, 31))
self.assertEqual(scheduled_out, datetime(2015, 4, 1, 5))
def test_before_midnight(self):
"""The case when the scheduled and current time cross midnight.
- The given recurrence is scheduled daily for Eastern Time at 11:59PM.
- The given current date in UTC is 2013/1/15::05:05:22 -> 12:05AM EST
- We then expect the next schedule in UTC to be 2013/1/16::05:0:0
"""
self.lr_day.offset = timedelta(hours=23, minutes=59)
self.lr_day.save()
current_time = datetime(2013, 1, 15, 5, 5, 22)
expected_next_schedule = datetime(2013, 1, 16, 4, 59)
schedule_out = self.lr_day.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_after_midnight(self):
"""
The case when the scheduled and current time are after midnight.
- The given recurrence is scheduled daily for Eastern Time at 12:01 AM.
- The given current date in UTC is 2013/1/15::05:05:22 -> 12:05AM EST
- We then expect the next schedule in UTC to be 2013/1/16::05:01:0
"""
self.lr_day.offset = timedelta(minutes=1)
self.lr_day.save()
current_time = datetime(2013, 1, 15, 5, 5, 22)
expected_next_schedule = datetime(2013, 1, 16, 5, 1)
schedule_out = self.lr_day.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_week_update_full_week(self):
"""
Weekly Recurrences should be able to add a full week.
- Thursday August 8th at 10:34 PM UTC is 5:34 PM CDT.
- Scheduled for weekly, Thursday at 5:30
- Expect next schedule to be Thursday August 15th at 10:30 PM UTC
"""
current_time = datetime(2013, 8, 8, 22, 34)
expected_next_schedule = datetime(2013, 8, 15, 22, 30)
schedule_out = self.lr_week.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_weekly_update_current_week(self):
"""
Weekly Recurrences should be able to work in the current week.
- Tuesday August 6th at 10:34 PM UTC is 5:34 PM CDT.
- Scheduled for weekly, Thursday at 5:30
- Expect next schedule to be Thursday August 8th at 10:30 PM UTC
"""
current_time = datetime(2013, 8, 6, 22, 34)
expected_next_schedule = datetime(2013, 8, 8, 22, 30)
schedule_out = self.lr_week.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_month_update_full_month(self):
"""
Monthly Recurrences should work as expected moving forward a full month.
- Friday August 23rd at 12:34 AM UTC is Thursday August 22nd at 7:34 PM CDT.
- Scheduled for Monthly, on the 21st day at 7:15.10 PM
- Expect next schedule to be September 23 at 12:15.10 AM UTC
"""
current_time = datetime(2013, 8, 23, 0, 34, 55)
expected_next_schedule = datetime(2013, 9, 23, 0, 15, 10)
schedule_out = self.lr_month.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_month_update_current_month(self):
"""
Monthly Recurrences should work as expected moving forward in the
current month.
- Friday August 16th at 12:34 AM UTC is Thursday August 15th at 7:34 PM CDT.
- Scheduled for Monthly, on the 21st day at 7:15.10 PM
- Expect next schedule to be August 23 at 12:15.10 AM UTC
"""
current_time = datetime(2013, 8, 16, 0, 34, 55)
expected_next_schedule = datetime(2013, 8, 23, 0, 15, 10)
schedule_out = self.lr_month.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_month_year_end_update(self):
"""
Monthly Recurrences should transition over years correctly
- Monday December 23 at 2:34 AM UTC is Sunday August 22nd at 9:34 PM CDT.
- Scheduled for Monthly, on the 21st day at 7:15.10 PM CDT
- Expect next schedule to be January 23 at 1:15.10 AM UCT
"""
current_time = datetime(2013, 12, 23, 2, 34, 55)
expected_next_schedule = datetime(2014, 1, 23, 1, 15, 10)
schedule_out = self.lr_month.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_quarterly_full_quarter(self):
"""
Quarterly Recurrences should be able to update a full quarter.
- June 23rd at 12:34 AM UTC is June 22nd at 10:34 PM HKT.
- Scheduled for Quarterly, on the 68th day at 4:30 PM HKT
- Expect next schedule to be September 7th at 8:30 AM UTC
"""
current_time = datetime(2013, 6, 23, 0, 34, 55)
expected_next_schedule = datetime(2013, 9, 7, 8, 30)
schedule_out = self.lr_quarter.utc_of_next_schedule(current_time)
self.lr_quarter = G(
LocalizedRecurrence,
interval='QUARTER',
offset=timedelta(days=68, hours=16, minutes=30),
timezone=pytz.timezone('Asia/Hong_Kong')
)
self.assertEqual(schedule_out, expected_next_schedule)
def test_quarterly_current_quarter(self):
"""
Quarterly Recurrences should be able to update in the current quarter.
- April 23rd at 12:34 AM UTC is April 22nd at 10:34 PM HKT.
- Scheduled for Quarterly, on the 68th day at 4:30 PM HKT
- Expect next schedule to be June 8th at 8:30 AM UTC
"""
current_time = datetime(2013, 4, 23, 0, 34, 55)
expected_next_schedule = datetime(2013, 6, 8, 8, 30)
schedule_out = self.lr_quarter.utc_of_next_schedule(current_time)
self.lr_quarter = G(
LocalizedRecurrence,
interval='QUARTER',
offset=timedelta(days=68, hours=16, minutes=30),
timezone=pytz.timezone('Asia/Hong_Kong')
)
self.assertEqual(schedule_out, expected_next_schedule)
def test_quarterly_end_year(self):
"""
Quarterly Recurrences should be able to update through year end.
- December 23rd at 12:34 AM UTC is April 22nd at 10:34 PM HKT.
- Scheduled for Quarterly, on the 68th day at 4:30 PM HKT
- Expect next schedule to be March 10th at 8:30 AM UTC
"""
current_time = datetime(2013, 12, 23, 0, 34, 55)
expected_next_schedule = datetime(2014, 3, 10, 8, 30)
schedule_out = self.lr_quarter.utc_of_next_schedule(current_time)
self.lr_quarter = G(
LocalizedRecurrence,
interval='QUARTER',
offset=timedelta(days=68, hours=16, minutes=30),
timezone=pytz.timezone('Asia/Hong_Kong')
)
self.assertEqual(schedule_out, expected_next_schedule)
def test_yearly(self):
"""
Yearly Recurrences should work as expected.
- June 23rd at 12:34 AM UTC is June 22nd at 10:34 PM HKT.
- Scheduled for Yearly, on the 31st day at 4:30 PM HKT
- Expect next schedule to be February 1st at 8:30 AM UTC
"""
current_time = datetime(2013, 6, 23, 0, 34, 55)
expected_next_schedule = datetime(2014, 2, 1, 8, 30)
schedule_out = self.lr_year.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_into_dst_boundary(self):
"""
Recurrences happen at the correct local time after going into DST.
Going into daylight savings time should mean the UTC time is
an hour less on the next recurrence.
- 2013 US DST began: Sunday March 10th.
- Weekly recurence, every Thursday at Noon EST.
"""
self.lr_week.offset = timedelta(days=3, hours=12)
self.lr_week.save()
current_time = datetime(2013, 3, 7, 18)
expected_next_schedule = datetime(2013, 3, 14, 17)
schedule_out = self.lr_week.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_out_of_dst_boundary(self):
"""
Recurrences at the correct local time after going out of UTC.
Going into daylight savings time should mean the UTC time is
an hour greater on the next recurrence.
- 2013 US DST ended: Sunday November 3rd.
- Weekly recurence, every Thursday at Noon EST.
"""
self.lr_week.offset = timedelta(days=3, hours=12)
self.lr_week.save()
current_time = datetime(2013, 10, 31, 17)
expected_next_schedule = datetime(2013, 11, 7, 18)
schedule_out = self.lr_week.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
def test_utc_plus(self):
"""
Test a timezone that is UTC + 2.
- Europe/Berlin DST is UTC + 2
"""
self.lr_day.timezone = pytz.timezone('Europe/Berlin')
self.lr_day.save()
current_time = datetime(2013, 5, 5, 10, 10)
expected_next_schedule = datetime(2013, 5, 6, 10)
schedule_out = self.lr_day.utc_of_next_schedule(current_time)
self.assertEqual(schedule_out, expected_next_schedule)
class UpdateScheduleTest(TestCase):
def setUp(self):
self.lr_week = G(
LocalizedRecurrence,
interval='WEEK', offset=timedelta(hours=12),
timezone=pytz.timezone('US/Eastern'))
self.lr_day = G(
LocalizedRecurrence,
interval='DAY',
offset=timedelta(hours=12),
timezone=pytz.timezone('US/Eastern'))
def test_updates_localized_recurrences(self):
time = datetime(year=2013, month=5, day=20, hour=12, minute=3)
_update_schedule([self.lr_week], time)
self.assertGreater(self.lr_week.next_scheduled, time)
self.assertEqual(self.lr_week.previous_scheduled, time)
class ReplaceWithOffsetTest(TestCase):
def test_day(self):
"""
_replace_with_offset works as expected with a 'DAY' interval.
"""
dt_in = datetime(2013, 1, 20, 12, 45, 48)
td_in = timedelta(hours=3, minutes=3, seconds=3)
interval_in = 'DAY'
dt_expected = datetime(2013, 1, 20, 3, 3, 3)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_out, dt_expected)
def test_week(self):
"""
_replace_with_offset works as expected with a 'WEEK' interval.
"""
dt_in = datetime(2013, 1, 20, 12, 45, 48)
td_in = timedelta(days=4, hours=3, minutes=3, seconds=3)
interval_in = 'WEEK'
dt_expected = datetime(2013, 1, 18, 3, 3, 3)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_out, dt_expected)
def test_week_on_month_boundary(self):
"""
_replace_with_offset using interval 'WEEK' should roll over months
correctly.
"""
dt_in = datetime(2013, 7, 30, 12, 45, 48)
td_in = timedelta(days=4, hours=3, minutes=3, seconds=3)
interval_in = 'WEEK'
dt_expected = datetime(2013, 8, 2, 3, 3, 3)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_out, dt_expected)
def test_month(self):
"""
_replace_with_offset works as expected with a 'MONTH' interval.
"""
dt_in = datetime(2013, 1, 20, 12, 45, 48)
td_in = timedelta(days=15, hours=3, minutes=3, seconds=3)
interval_in = 'MONTH'
dt_expected = datetime(2013, 1, 16, 3, 3, 3)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_out, dt_expected)
def test_last_day_of_month(self):
"""
Check dates for a full year where the utc time is the first and the time zone is the previous day
"""
time_delta = timedelta(days=30, hours=23, minutes=3, seconds=3)
dt_start = datetime(2013, 2, 20, 23, 45, 48)
interval_name = 'MONTH'
timezone_name = 'US/Central'
recurrence = LocalizedRecurrence.objects.create(
interval=interval_name,
offset=time_delta,
timezone=timezone_name,
next_scheduled=dt_start,
)
# Check a full year of dates. The first next recurrence should be the month after it starts because
# The start time is weird because it should be set correctly to begin with. Setting it to 2-20 should not
# be happening. The app should initially set it to the correct first fire date
self.assertEqual(recurrence.next_scheduled, datetime(2013, 2, 20, 23, 45, 48))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 4, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 5, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 6, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 7, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 8, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 9, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 10, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 11, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2013, 12, 1, 5, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2014, 1, 1, 5, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2014, 2, 1, 5, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2014, 3, 1, 5, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
self.assertEqual(recurrence.next_scheduled, datetime(2014, 4, 1, 4, 3, 3))
recurrence.update_schedule(recurrence.next_scheduled)
def test_quarter(self):
dt_in = datetime(2013, 4, 20, 12, 45, 48)
td_in = timedelta(days=65, hours=3, minutes=3, seconds=3)
interval_in = 'QUARTER'
dt_expected = datetime(2013, 6, 5, 3, 3, 3)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_out, dt_expected)
def test_quarterly_past(self):
dt_in = datetime(2013, 6, 23, 0, 34, 55)
td_in = timedelta(days=68, hours=16, minutes=30)
interval_in = 'QUARTER'
dt_expected = datetime(2013, 6, 8, 16, 30)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_quarterly_overshoot(self):
dt_in = datetime(2013, 1, 1, 0)
td_in = timedelta(days=90, hours=12)
interval_in = 'QUARTER'
dt_expected = datetime(2013, 3, 31, 12)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_quarterly_undershoot(self):
dt_in = datetime(2013, 7, 1, 0)
td_in = timedelta(days=90, hours=12)
interval_in = 'QUARTER'
dt_expected = datetime(2013, 9, 29, 12)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_year(self):
dt_in = datetime(2013, 6, 23, 0, 34, 55)
td_in = timedelta(days=5, hours=16, minutes=30)
interval_in = 'YEAR'
dt_expected = datetime(2013, 1, 6, 16, 30)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_year_end_leap_year(self):
dt_in = datetime(2016, 6, 23, 0, 34, 55)
td_in = timedelta(days=365, hours=16, minutes=30)
interval_in = 'YEAR'
dt_expected = datetime(2016, 12, 31, 16, 30)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_year_end_non_leap_year(self):
dt_in = datetime(2015, 6, 23, 0, 34, 55)
td_in = timedelta(days=365, hours=16, minutes=30)
interval_in = 'YEAR'
dt_expected = datetime(2015, 12, 31, 16, 30)
dt_out = _replace_with_offset(dt_in, td_in, interval_in)
self.assertEqual(dt_expected, dt_out)
def test_bad_interval(self):
"""
A missformed interval should raise a value error
"""
dt_in = datetime(2013, 1, 20, 12, 45, 48)
td_in = timedelta(days=15, hours=3, minutes=3, seconds=3)
interval_in = 'blah'
with self.assertRaises(ValueError):
_replace_with_offset(dt_in, td_in, interval_in)
| mit |
robjohnson189/home-assistant | homeassistant/util/__init__.py | 20 | 9523 | """Helper methods for various modules."""
from collections.abc import MutableSet
from itertools import chain
import threading
from datetime import datetime
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from unicodedata import normalize
from typing import Any, Optional, TypeVar, Callable, Sequence, KeysView, Union
from .dt import as_local, utcnow
T = TypeVar('T')
U = TypeVar('U')
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
def sanitize_filename(filename: str) -> str:
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path: str) -> str:
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
text = normalize('NFKD', text).lower().replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return as_local(inp).isoformat()
else:
return str(inp)
def convert(value: T, to_type: Callable[[T], U],
default: Optional[U]=None) -> Optional[U]:
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str, current_strings:
Union[Sequence[str], KeysView[str]]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip():
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0]
except socket.error:
return socket.gethostbyname(socket.gethostname())
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length=10):
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# pylint: disable=no-init
def __ge__(self, other):
"""Return the greater than element."""
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
"""Return the greater element."""
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
"""Return the lower than element."""
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
"""Return the lower element."""
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(MutableSet):
"""Ordered set taken from http://code.activestate.com/recipes/576694/."""
def __init__(self, iterable=None):
"""Initialize the set."""
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
"""Return the length of the set."""
return len(self.map)
def __contains__(self, key):
"""Check if key is in set."""
return key in self.map
def add(self, key):
"""Add an element to the end of the set."""
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
"""Promote element to beginning of the set, add if not there."""
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
"""Discard an element from the set."""
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
"""Iteration of the set."""
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
"""Reverse the ordering."""
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# pylint: disable=arguments-differ
def pop(self, last=True):
"""Pop element of the end of the set.
Set last=False to pop from the beginning.
"""
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
"""Add elements from args to the set."""
for item in chain(*args):
self.add(item)
def __repr__(self):
"""Return the representation."""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""Return the comparision."""
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
def __init__(self, min_time, limit_no_throttle=None):
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
"""Caller for the throttle."""
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname seperated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args, **kwargs):
"""Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = method.__self__
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return None
# Check if method is never called or no_throttle is given
force = not throttle[1] or kwargs.pop('no_throttle', False)
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result
else:
return None
finally:
throttle[0].release()
return wrapper
| mit |
GinnyN/towerofdimensions-django | ifz/build/pip/build/lib/pip/download.py | 17 | 17565 | import cgi
import getpass
import hashlib
import mimetypes
import os
import re
import shutil
import sys
import tempfile
from pip.backwardcompat import (xmlrpclib, urllib, urllib2,
urlparse, string_types)
from pip.exceptions import InstallationError
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
__all__ = ['xmlrpclib_transport', 'get_file_content', 'urlopen',
'is_url', 'url_to_path', 'path_to_url', 'path_to_url2',
'geturl', 'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
xmlrpclib_transport = xmlrpclib.Transport()
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content)"""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urlopen(url)
return geturl(resp), resp.read()
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
class URLOpener(object):
"""
pip's own URL helper that adds HTTP auth and proxy support
"""
def __init__(self):
self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
def __call__(self, url):
"""
If the given url contains auth info or if a normal request gets a 401
response, an attempt is made to fetch the resource using basic HTTP
auth.
"""
url, username, password = self.extract_credentials(url)
if username is None:
try:
response = urllib2.urlopen(self.get_request(url))
except urllib2.HTTPError:
e = sys.exc_info()[1]
if e.code != 401:
raise
response = self.get_response(url)
else:
response = self.get_response(url, username, password)
return response
def get_request(self, url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
"""
if isinstance(url, string_types):
url = urllib2.Request(url, headers={'Accept-encoding': 'identity'})
return url
def get_response(self, url, username=None, password=None):
"""
does the dirty work of actually getting the rsponse object using urllib2
and its HTTP auth builtins.
"""
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
req = self.get_request(url)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
# see if we have a password stored
if stored_username is None:
if username is None and self.prompting:
username = urllib.quote(raw_input('User for %s: ' % netloc))
password = urllib.quote(getpass.getpass('Password: '))
if username and password:
self.passman.add_password(None, netloc, username, password)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
opener = urllib2.build_opener(authhandler)
# FIXME: should catch a 401 and offer to let the user reenter credentials
return opener.open(req)
def setup(self, proxystr='', prompting=True):
"""
Sets the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
self.prompting = prompting
proxy = self.get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy, "https": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
def extract_credentials(self, url):
"""
Extracts user/password from a url.
Returns a tuple:
(url-without-auth, username, password)
"""
if isinstance(url, urllib2.Request):
result = urlparse.urlsplit(url.get_full_url())
else:
result = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = result
username, password = self.parse_credentials(netloc)
if username is None:
return url, None, None
elif password is None and self.prompting:
# remove the auth credentials from the url part
netloc = netloc.replace('%s@' % username, '', 1)
# prompt for the password
prompt = 'Password for %s@%s: ' % (username, netloc)
password = urllib.quote(getpass.getpass(prompt))
else:
# remove the auth credentials from the url part
netloc = netloc.replace('%s:%s@' % (username, password), '', 1)
target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
return target_url, username, password
def get_proxy(self, proxystr=''):
"""
Get the proxy given the option passed on the command line.
If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
urlopen = URLOpener()
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url
def path_to_url2(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest)
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def unpack_file_url(link, location):
source = url_to_path(link.url)
content_type = mimetypes.guess_type(source)[0]
if os.path.isdir(source):
# delete the location since shutil will create it again :(
if os.path.isdir(location):
rmtree(location)
shutil.copytree(source, location)
else:
unpack_file(source, location, content_type, link)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise InstallationError('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash, link.hash))
raise InstallationError('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.info()['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40*1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.debug('Downloading from URL %s' % link)
while True:
chunk = resp.read(4096)
if not chunk:
break
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100*downloaded/total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.indent -= 2
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None):
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
target_url = link.url.split('#', 1)[0]
target_file = None
download_hash = None
if download_cache:
target_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
if (target_file
and os.path.exists(target_file)
and os.path.exists(target_file + '.content-type')):
fp = open(target_file+'.content-type')
content_type = fp.read().strip()
fp.close()
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(target_file, link)
temp_location = target_file
logger.notify('Using download cache from %s' % target_file)
else:
resp = _get_response_from_url(target_url, link)
content_type = resp.info()['content-type']
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != geturl(resp):
ext = os.path.splitext(geturl(resp))[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
if download_dir:
_copy_file(temp_location, download_dir, content_type, link)
unpack_file(temp_location, location, content_type, link)
if target_file and target_file != temp_location:
cache_download(target_file, temp_location, content_type)
if target_file is None:
os.unlink(temp_location)
os.rmdir(temp_dir)
def _get_response_from_url(target_url, link):
try:
resp = urlopen(target_url)
except urllib2.HTTPError:
e = sys.exc_info()[1]
logger.fatal("HTTP error %s while getting %s" % (e.code, link))
raise
except IOError:
e = sys.exc_info()[1]
# Typically an FTP error
logger.fatal("Error %s while getting %s" % (e, link))
raise
return resp
class Urllib2HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
| bsd-3-clause |
Zord13appdesa/python-for-android | python3-alpha/python3-src/Lib/encodings/cp1251.py | 272 | 13361 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u20ac' # 0x88 -> EURO SIGN
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
'\u2116' # 0xB9 -> NUMERO SIGN
'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
auduny/home-assistant | homeassistant/components/coinbase/__init__.py | 7 | 3122 | """Support for Coinbase."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'coinbase'
CONF_API_SECRET = 'api_secret'
CONF_ACCOUNT_CURRENCIES = 'account_balance_currencies'
CONF_EXCHANGE_CURRENCIES = 'exchange_rate_currencies'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
DATA_COINBASE = 'coinbase_cache'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_API_SECRET): cv.string,
vol.Optional(CONF_ACCOUNT_CURRENCIES):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXCHANGE_CURRENCIES, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Coinbase component.
Will automatically setup sensors to support
wallets discovered on the network.
"""
api_key = config[DOMAIN].get(CONF_API_KEY)
api_secret = config[DOMAIN].get(CONF_API_SECRET)
account_currencies = config[DOMAIN].get(CONF_ACCOUNT_CURRENCIES)
exchange_currencies = config[DOMAIN].get(CONF_EXCHANGE_CURRENCIES)
hass.data[DATA_COINBASE] = coinbase_data = CoinbaseData(
api_key, api_secret)
if not hasattr(coinbase_data, 'accounts'):
return False
for account in coinbase_data.accounts.data:
if (account_currencies is None or
account.currency in account_currencies):
load_platform(hass,
'sensor',
DOMAIN,
{'account': account},
config)
for currency in exchange_currencies:
if currency not in coinbase_data.exchange_rates.rates:
_LOGGER.warning("Currency %s not found", currency)
continue
native = coinbase_data.exchange_rates.currency
load_platform(hass,
'sensor',
DOMAIN,
{'native_currency': native,
'exchange_currency': currency},
config)
return True
class CoinbaseData:
"""Get the latest data and update the states."""
def __init__(self, api_key, api_secret):
"""Init the coinbase data object."""
from coinbase.wallet.client import Client
self.client = Client(api_key, api_secret)
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from coinbase."""
from coinbase.wallet.error import AuthenticationError
try:
self.accounts = self.client.get_accounts()
self.exchange_rates = self.client.get_exchange_rates()
except AuthenticationError as coinbase_error:
_LOGGER.error("Authentication error connecting"
" to coinbase: %s", coinbase_error)
| apache-2.0 |
apechimp/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_treewalkers.py | 429 | 13692 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("dom")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes["genshi"] = \
{"builder": treebuilders.getTreeBuilder("dom"),
"adapter": GenshiAdapter,
"walker": treewalkers.getTreeWalker("genshi")}
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += " " + token["name"]
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
attrs = token["data"]
if attrs:
# TODO: Remove this if statement, attrs should always exist
for (namespace, name), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += " " + name
else:
outputname = name
output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" " * indent, token["data"]))
else:
pass # TODO: what to do with errors?
return "\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter("error")
try:
p = html5parser.HTMLParser(tree=treeClass["builder"])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
[line + "\n" for line in output.splitlines()],
"Expected", "Received"))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output,
"", "Diff:", diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
def set_attribute_on_first_child(docfrag, name, value, treeName):
"""naively sets an attribute on the first child of the document
fragment passed in"""
setter = {'ElementTree': lambda d: d[0].set,
'DOM': lambda d: d.firstChild.setAttribute}
setter['cElementTree'] = setter['ElementTree']
try:
setter.get(treeName, setter['DOM'])(docfrag)(name, value)
except AttributeError:
setter['ElementTree'](docfrag)(name, value)
def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
"""tests what happens when we add attributes to the intext"""
treeName, treeClass = tree
parser = html5parser.HTMLParser(tree=treeClass["builder"])
document = parser.parseFragment(intext)
for nom, val in attrs_to_add:
set_attribute_on_first_child(document, nom, val, treeName)
document = treeClass.get("adapter", lambda x: x)(document)
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
if not output in expected:
raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
def test_treewalker_six_mix():
"""Str/Unicode mix. If str attrs added to tree"""
# On Python 2.x string literals are of type str. Unless, like this
# file, the programmer imports unicode_literals from __future__.
# In that case, string literals become objects of type unicode.
# This test simulates a Py2 user, modifying attributes on a document
# fragment but not using the u'' syntax nor importing unicode_literals
sm_tests = [
('<a href="http://example.com">Example</a>',
[(str('class'), str('test123'))],
'<a>\n class="test123"\n href="http://example.com"\n "Example"'),
('<link href="http://example.com/cow">',
[(str('rel'), str('alternate'))],
'<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
]
for tree in treeTypes.items():
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
| mpl-2.0 |
liam821/e46fanatics | crawler.py | 1 | 4142 | #!/usr/bin/env python
import cgi
import cgitb
import re
import string
# https://pypi.python.org/pypi/urllib3
import urllib3
'''
e46 page crawler
Please see http://forum.e46fanatics.com/showthread.php?t=1049600 for help
By Liam Slusser / lslusser at gmail.com / 8/22/2014
'''
cgitb.enable()
class urlFetcher:
def __init__(self):
self.h = urllib3.PoolManager()
self.maxtime = 5
self.headers = {}
self.type = "GET"
self.r = None
def fetch(self,url):
self.r = self.h.urlopen(self.type,url,headers=self.headers)
return self.r.status
def returnStatus(self):
if self.r:
return self.r.status
def returnData(self):
if self.r:
return self.r.data
class doSearch:
def __init__(self,form):
self.form = form
self.pagecache = ""
self.start = ""
self.end = ""
self.startpage = 1
self.endpage = 0
self.pagenumbers = 0
self.urlfetcher = urlFetcher()
self.urlfetcher.site = "forum.e46fanatics.com"
self.urlfetcher.host = "forum.e46fanatics.com"
self.urlfetcher.headers['host'] = "forum.e46fanatics.com"
self.urlfetcher.headers['User-Agent'] = "liam821.com e46fanatics page crawler"
self.checkParams()
self.build()
def checkParams(self):
if "startpage" in self.form:
self.startpage = int(self.form.getlist("startpage")[0])
if "endpage" in self.form:
self.endpage = int(self.form.getlist("endpage")[0])
def build(self):
# fetch page one and verify
self.postid = int(self.form.getlist("postid")[0])
url = """http://%s/showthread.php?t=%s&page=1""" % (self.urlfetcher.site,self.postid)
if self.urlfetcher.fetch(url):
self.pagecache = self.urlfetcher.returnData()
self.start = re.findall('^.*<div id="posts">',self.pagecache,re.M|re.DOTALL)[0]
self.end = re.findall('(<div id="lastpost".*)',self.pagecache,re.M|re.DOTALL)[0]
self.pagenumbers = int(re.findall('Page 1 of (\d+)',self.pagecache)[0])
if self.endpage:
if self.endpage > self.pagenumbers:
self.endpage = self.pagenumbers
else:
self.endpage = self.pagenumbers
if self.startpage:
if self.startpage > self.pagenumbers:
raise Exception("Your startpage is greater than the total page numbers!")
print self.start
for page in range(self.startpage,self.endpage+1):
print "Page %s" % (page)
if page == 1:
for post in re.findall('<!-- post #.*?-- / post #\d+ -->',self.pagecache,re.M|re.DOTALL):
if re.findall('bold" href="member.php.*[^\n]">(.*?)<',post)[0] == self.form.getlist("username")[0]:
print post
else:
url = """http://%s/showthread.php?t=%s&page=%s""" % (self.urlfetcher.site,self.postid,page)
if self.urlfetcher.fetch(url):
self.pagecache = self.urlfetcher.returnData()
for post in re.findall('<!-- post #.*?-- / post #\d+ -->',self.pagecache,re.M|re.DOTALL):
if re.findall('bold" href="member.php.*[^\n]">(.*?)<',post)[0] == self.form.getlist("username")[0]:
print post
print self.end
if __name__ == "__main__":
print "Content-Type: text/html" # HTML is following
print # blank line, end of headers
form = cgi.FieldStorage()
if "postid" in form and "username" in form:
doSearch(form)
else:
print "<h1>You need to give me a postid and username</h1>"
print """<br><br>See http://forum.e46fanatics.com/showthread.php?t=1049600 for help."""
raise Exception("You need to give me a postid and username!")
| gpl-2.0 |
wouwei/PiLapse | picam/picamEnv/Lib/site-packages/click/_textwrap.py | 282 | 1198 | import textwrap
from contextlib import contextmanager
class TextWrapper(textwrap.TextWrapper):
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
space_left = max(width - cur_len, 1)
if self.break_long_words:
last = reversed_chunks[-1]
cut = last[:space_left]
res = last[space_left:]
cur_line.append(cut)
reversed_chunks[-1] = res
elif not cur_line:
cur_line.append(reversed_chunks.pop())
@contextmanager
def extra_indent(self, indent):
old_initial_indent = self.initial_indent
old_subsequent_indent = self.subsequent_indent
self.initial_indent += indent
self.subsequent_indent += indent
try:
yield
finally:
self.initial_indent = old_initial_indent
self.subsequent_indent = old_subsequent_indent
def indent_only(self, text):
rv = []
for idx, line in enumerate(text.splitlines()):
indent = self.initial_indent
if idx > 0:
indent = self.subsequent_indent
rv.append(indent + line)
return '\n'.join(rv)
| apache-2.0 |
brianjgeiger/osf.io | website/preprints/tasks.py | 3 | 8862 | from django.apps import apps
import logging
from future.moves.urllib.parse import urljoin
import random
import requests
from framework.exceptions import HTTPError
from framework.celery_tasks import app as celery_app
from framework.postcommit_tasks.handlers import enqueue_postcommit_task, get_task_from_postcommit_queue
from framework import sentry
from website import settings, mails
from website.util.share import GraphNode, format_contributor, format_subject
logger = logging.getLogger(__name__)
@celery_app.task(ignore_results=True, max_retries=5, default_retry_delay=60)
def on_preprint_updated(preprint_id, update_share=True, share_type=None, old_subjects=None, saved_fields=None):
# WARNING: Only perform Read-Only operations in an asynchronous task, until Repeatable Read/Serializable
# transactions are implemented in View and Task application layers.
from osf.models import Preprint
preprint = Preprint.load(preprint_id)
if old_subjects is None:
old_subjects = []
need_update = bool(preprint.SEARCH_UPDATE_FIELDS.intersection(saved_fields or {}))
if need_update:
preprint.update_search()
if should_update_preprint_identifiers(preprint, old_subjects, saved_fields):
update_or_create_preprint_identifiers(preprint)
if update_share:
update_preprint_share(preprint, old_subjects, share_type)
def should_update_preprint_identifiers(preprint, old_subjects, saved_fields):
# Only update identifier metadata iff...
return (
# DOI didn't just get created
preprint and preprint.date_published and
not (saved_fields and 'preprint_doi_created' in saved_fields) and
# subjects aren't being set
not old_subjects and
# preprint isn't QA test
preprint.should_request_identifiers
)
def update_or_create_preprint_identifiers(preprint):
try:
preprint.request_identifier_update(category='doi')
except HTTPError as err:
sentry.log_exception()
sentry.log_message(err.args[0])
def update_or_enqueue_on_preprint_updated(preprint_id, update_share=True, share_type=None, old_subjects=None, saved_fields=None):
task = get_task_from_postcommit_queue(
'website.preprints.tasks.on_preprint_updated',
predicate=lambda task: task.kwargs['preprint_id'] == preprint_id
)
if task:
old_subjects = old_subjects or []
task_subjects = task.kwargs['old_subjects'] or []
task.kwargs['update_share'] = update_share or task.kwargs['update_share']
task.kwargs['share_type'] = share_type or task.kwargs['share_type']
task.kwargs['old_subjects'] = old_subjects + task_subjects
task.kwargs['saved_fields'] = list(set(task.kwargs['saved_fields']).union(saved_fields))
else:
enqueue_postcommit_task(
on_preprint_updated,
(),
{'preprint_id': preprint_id, 'old_subjects': old_subjects, 'update_share': update_share, 'share_type': share_type, 'saved_fields': saved_fields},
celery=True
)
def update_preprint_share(preprint, old_subjects=None, share_type=None):
if settings.SHARE_URL:
if not preprint.provider.access_token:
raise ValueError('No access_token for {}. Unable to send {} to SHARE.'.format(preprint.provider, preprint))
share_type = share_type or preprint.provider.share_publish_type
_update_preprint_share(preprint, old_subjects, share_type)
def _update_preprint_share(preprint, old_subjects, share_type):
# Any modifications to this function may need to change _async_update_preprint_share
data = serialize_share_preprint_data(preprint, share_type, old_subjects)
resp = send_share_preprint_data(preprint, data)
try:
resp.raise_for_status()
except Exception:
if resp.status_code >= 500:
_async_update_preprint_share.delay(preprint._id, old_subjects, share_type)
else:
send_desk_share_preprint_error(preprint, resp, 0)
@celery_app.task(bind=True, max_retries=4, acks_late=True)
def _async_update_preprint_share(self, preprint_id, old_subjects, share_type):
# Any modifications to this function may need to change _update_preprint_share
# Takes preprint_id to ensure async retries push fresh data
Preprint = apps.get_model('osf.Preprint')
preprint = Preprint.load(preprint_id)
data = serialize_share_preprint_data(preprint, share_type, old_subjects)
resp = send_share_preprint_data(preprint, data)
try:
resp.raise_for_status()
except Exception as e:
if resp.status_code >= 500:
if self.request.retries == self.max_retries:
send_desk_share_preprint_error(preprint, resp, self.request.retries)
raise self.retry(
exc=e,
countdown=(random.random() + 1) * min(60 + settings.CELERY_RETRY_BACKOFF_BASE ** self.request.retries, 60 * 10)
)
else:
send_desk_share_preprint_error(preprint, resp, self.request.retries)
def serialize_share_preprint_data(preprint, share_type, old_subjects):
return {
'data': {
'type': 'NormalizedData',
'attributes': {
'tasks': [],
'raw': None,
'data': {'@graph': format_preprint(preprint, share_type, old_subjects)}
}
}
}
def send_share_preprint_data(preprint, data):
resp = requests.post('{}api/v2/normalizeddata/'.format(settings.SHARE_URL), json=data, headers={'Authorization': 'Bearer {}'.format(preprint.provider.access_token), 'Content-Type': 'application/vnd.api+json'})
logger.debug(resp.content)
return resp
def format_preprint(preprint, share_type, old_subjects=None):
if old_subjects is None:
old_subjects = []
from osf.models import Subject
old_subjects = [Subject.objects.get(id=s) for s in old_subjects]
preprint_graph = GraphNode(share_type, **{
'title': preprint.title,
'description': preprint.description or '',
'is_deleted': (
(not preprint.verified_publishable and not preprint.is_retracted) or
preprint.tags.filter(name='qatest').exists()
),
'date_updated': preprint.modified.isoformat(),
'date_published': preprint.date_published.isoformat() if preprint.date_published else None
})
to_visit = [
preprint_graph,
GraphNode('workidentifier', creative_work=preprint_graph, uri=urljoin(settings.DOMAIN, preprint._id + '/'))
]
if preprint.get_identifier('doi'):
to_visit.append(GraphNode('workidentifier', creative_work=preprint_graph, uri='https://doi.org/{}'.format(preprint.get_identifier('doi').value)))
if preprint.provider.domain_redirect_enabled:
to_visit.append(GraphNode('workidentifier', creative_work=preprint_graph, uri=preprint.absolute_url))
if preprint.article_doi:
# Article DOI refers to a clone of this preprint on another system and therefore does not qualify as an identifier for this preprint
related_work = GraphNode('creativework')
to_visit.append(GraphNode('workrelation', subject=preprint_graph, related=related_work))
to_visit.append(GraphNode('workidentifier', creative_work=related_work, uri='https://doi.org/{}'.format(preprint.article_doi)))
preprint_graph.attrs['tags'] = [
GraphNode('throughtags', creative_work=preprint_graph, tag=GraphNode('tag', name=tag))
for tag in preprint.tags.values_list('name', flat=True) if tag
]
current_subjects = [
GraphNode('throughsubjects', creative_work=preprint_graph, is_deleted=False, subject=format_subject(s))
for s in preprint.subjects.all()
]
deleted_subjects = [
GraphNode('throughsubjects', creative_work=preprint_graph, is_deleted=True, subject=format_subject(s))
for s in old_subjects if not preprint.subjects.filter(id=s.id).exists()
]
preprint_graph.attrs['subjects'] = current_subjects + deleted_subjects
to_visit.extend(format_contributor(preprint_graph, user, preprint.get_visible(user), i) for i, user in enumerate(preprint.contributors))
visited = set()
to_visit.extend(preprint_graph.get_related())
while True:
if not to_visit:
break
n = to_visit.pop(0)
if n in visited:
continue
visited.add(n)
to_visit.extend(list(n.get_related()))
return [node.serialize() for node in visited]
def send_desk_share_preprint_error(preprint, resp, retries):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.SHARE_PREPRINT_ERROR_DESK,
preprint=preprint,
resp=resp,
retries=retries,
can_change_preferences=False,
logo=settings.OSF_PREPRINTS_LOGO
)
| apache-2.0 |
cpollard1001/FreeCAD_sf_master | src/Tools/updatefromcrowdin.py | 6 | 11247 | #!/usr/bin/python
#***************************************************************************
#* *
#* Copyright (c) 2009 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Library General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
'''
Usage:
updatefromcrowdin.py [options] [LANGCODE] [LANGCODE LANGCODE...]
Example:
./updatefromcrowdin.py [-d <directory>] fr nl pt_BR
Options:
-h or --help : prints this help text
-d or --directory : specifies a directory containing unzipped translation folders
-z or --zipfile : specifies a path to the freecad.zip file
-m or --module : specifies a single module name to be updated, instead of all modules
This command must be run from its current source tree location (/src/Tools)
so it can find the correct places to put the translation files. If run with
no arguments, the latest translations from crowdin will be downloaded, unzipped
and put to the correct locations. The necessary renaming of files and .qm generation
will be taken care of. The qrc files will also be updated when new
translations are added.
NOTE! The crowdin site only allows to download "builds" (zipped archives)
which must be built prior to downloading. This means a build might not
reflect the latest state of the translations. Better always make a build before
using this script!
You can specify a directory with the -d option if you already downloaded
and extracted the build, or you can specify a single module to update with -m.
You can also run the script without any language code, in which case all the
languages contained in the archive or directory will be added.
'''
import sys, os, shutil, tempfile, zipfile, getopt, StringIO, re
crowdinpath = "http://crowdin.net/download/project/freecad.zip"
# locations list contains Module name, relative path to translation folder, relative path to qrc file, and optionally
# a python rc file
locations = [["Arch","../Mod/Arch/Resources/translations","../Mod/Arch/Resources/Arch.qrc"],
["Assembly","../Mod/Assembly/Gui/Resources/translations","../Mod/Assembly/Gui/Resources/Assembly.qrc"],
["Complete","../Mod/Complete/Gui/Resources/translations","../Mod/Complete/Gui/Resources/Complete.qrc"],
["draft","../Mod/Draft/Resources/translations","../Mod/Draft/Resources/Draft.qrc"],
["Drawing","../Mod/Drawing/Gui/Resources/translations","../Mod/Drawing/Gui/Resources/Drawing.qrc"],
["Fem","../Mod/Fem/Gui/Resources/translations","../Mod/Fem/Gui/Resources/Fem.qrc"],
["FreeCAD","../Gui/Language","../Gui/Language/translation.qrc"],
["Image","../Mod/Image/Gui/Resources/translations","../Mod/Image/Gui/Resources/Image.qrc"],
["Mesh","../Mod/Mesh/Gui/Resources/translations","../Mod/Mesh/Gui/Resources/Mesh.qrc"],
["MeshPart","../Mod/MeshPart/Gui/Resources/translations","../Mod/MeshPart/Gui/Resources/MeshPart.qrc"],
["OpenSCAD","../Mod/OpenSCAD/Resources/translations","../Mod/OpenSCAD/Resources/OpenSCAD.qrc"],
["Part","../Mod/Part/Gui/Resources/translations","../Mod/Part/Gui/Resources/Part.qrc"],
["PartDesign","../Mod/PartDesign/Gui/Resources/translations","../Mod/PartDesign/Gui/Resources/PartDesign.qrc"],
["Points","../Mod/Points/Gui/Resources/translations","../Mod/Points/Gui/Resources/Points.qrc"],
["Raytracing","../Mod/Raytracing/Gui/Resources/translations","../Mod/Raytracing/Gui/Resources/Raytracing.qrc"],
["ReverseEngineering","../Mod/ReverseEngineering/Gui/Resources/translations","../Mod/ReverseEngineering/Gui/Resources/ReverseEngineering.qrc"],
["Robot","../Mod/Robot/Gui/Resources/translations","../Mod/Robot/Gui/Resources/Robot.qrc"],
["Sketcher","../Mod/Sketcher/Gui/Resources/translations","../Mod/Sketcher/Gui/Resources/Sketcher.qrc"],
["StartPage","../Mod/Start/Gui/Resources/translations","../Mod/Start/Gui/Resources/Start.qrc"],
["Test","../Mod/Test/Gui/Resources/translations","../Mod/Test/Gui/Resources/Test.qrc"],
["Ship","../Mod/Ship/resources/translations","../Mod/Ship/resources/Ship.qrc"],
["Plot","../Mod/Plot/resources/translations","../Mod/Plot/resources/Plot.qrc"],
["Web","../Mod/Web/Gui/Resources/translations","../Mod/Web/Gui/Resources/Web.qrc"],
["Spreadsheet","../Mod/Spreadsheet/Gui/Resources/translations","../Mod/Spreadsheet/Gui/Resources/Spreadsheet.qrc"],
["Path","../Mod/Path/Gui/Resources/translations","../Mod/Path/Gui/Resources/Path.qrc"],
]
default_languages = "af zh-CN zh-TW hr cs nl fi fr de hu ja no pl pt-PT ro ru sr es-ES sv-SE uk it pt-BR el sk tr"
def updateqrc(qrcpath,lncode):
"updates a qrc file with the given translation entry"
print "opening " + qrcpath + "..."
# getting qrc file contents
if not os.path.exists(qrcpath):
print "ERROR: Resource file " + qrcpath + " doesn't exist"
sys.exit()
f = open(qrcpath,"ro")
resources = []
for l in f.readlines():
resources.append(l)
f.close()
# checking for existing entry
name = "_" + lncode + ".qm"
for r in resources:
if name in r:
print "language already exists in qrc file"
return
# find the latest qm line
pos = None
for i in range(len(resources)):
if ".qm" in resources[i]:
pos = i
if pos == None:
print "No existing .qm file in this resource. Appending to the end position"
for i in range(len(resources)):
if "</qresource>" in resources[i]:
pos = i-1
if pos == None:
print "ERROR: couldn't add qm files to this resource: " + qrcpath
sys.exit()
# inserting new entry just after the last one
line = resources[pos]
if ".qm" in line:
line = re.sub("_.*\.qm","_"+lncode+".qm",line)
else:
print "ERROR: no existing qm entry in this resource: Please add one manually " + qrcpath
sys.exit()
print "inserting line: ",line
resources.insert(pos+1,line)
# writing the file
f = open(qrcpath,"wb")
for r in resources:
f.write(r)
f.close()
print "successfully updated ",qrcpath
def doFile(tsfilepath,targetpath,lncode,qrcpath):
"updates a single ts file, and creates a corresponding qm file"
basename = os.path.basename(tsfilepath)[:-3]
# special fix of the draft filename...
if basename == "draft": basename = "Draft"
newname = basename + "_" + lncode + ".ts"
newpath = targetpath + os.sep + newname
shutil.copyfile(tsfilepath, newpath)
os.system("lrelease " + newpath)
newqm = targetpath + os.sep + basename + "_" + lncode + ".qm"
if not os.path.exists(newqm):
print "ERROR: impossible to create " + newqm + ", aborting"
sys.exit()
updateqrc(qrcpath,lncode)
def doLanguage(lncode,fmodule=""):
" treats a single language"
if lncode == "en":
# never treat "english" translation... For now :)
return
mods = []
if fmodule:
for l in locations:
if l[0].upper() == fmodule.upper():
mods = [l]
else:
mods = locations
if not mods:
print "Error: Couldn't find module "+fmodule
sys.exit()
for target in mods:
basefilepath = tempfolder + os.sep + lncode + os.sep + target[0] + ".ts"
targetpath = os.path.abspath(target[1])
qrcpath = os.path.abspath(target[2])
doFile(basefilepath,targetpath,lncode,qrcpath)
print lncode + " done!"
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 1:
print __doc__
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:z:m:", ["help", "directory=","zipfile=", "module="])
except getopt.GetoptError:
print __doc__
sys.exit()
# checking on the options
inputdir = ""
inputzip = ""
fmodule = ""
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
if o in ("-d", "--directory"):
inputdir = a
if o in ("-z", "--zipfile"):
inputzip = a
if o in ("-m", "--module"):
fmodule = a
currentfolder = os.getcwd()
if inputdir:
tempfolder = os.path.realpath(inputdir)
if not os.path.exists(tempfolder):
print "ERROR: " + tempfolder + " not found"
sys.exit()
elif inputzip:
tempfolder = tempfile.mkdtemp()
print "creating temp folder " + tempfolder
os.chdir(tempfolder)
inputzip=os.path.realpath(inputzip)
if not os.path.exists(inputzip):
print "ERROR: " + inputzip + " not found"
sys.exit()
shutil.copy(inputzip,tempfolder)
zfile=zipfile.ZipFile("freecad.zip")
print "extracting freecad.zip..."
zfile.extractall()
else:
tempfolder = tempfile.mkdtemp()
print "creating temp folder " + tempfolder
os.chdir(tempfolder)
os.system("wget "+crowdinpath)
if not os.path.exists("freecad.zip"):
print "download failed!"
sys.exit()
zfile=zipfile.ZipFile("freecad.zip")
print "extracting freecad.zip..."
zfile.extractall()
os.chdir(currentfolder)
if not args:
#args = [o for o in os.listdir(tempfolder) if o != "freecad.zip"]
# do not treat all languages in the zip file. Some are not translated enough.
args = default_languages.split()
for ln in args:
if not os.path.exists(tempfolder + os.sep + ln):
print "ERROR: language path for " + ln + " not found!"
else:
doLanguage(ln,fmodule)
| lgpl-2.1 |
ccarouge/cwsl-ctools | utils/extract_timeseries.py | 3 | 2708 | #!/usr/bin/env python
""" Extract a JSON time series from a netCDF file.
Returns a JSON object with an array of numerical
data and a corresponding array of ISO strings.
"""
import argparse
import re
import json
import sys
import datetime as dt
import scipy.io.netcdf as nc
def main(args):
""" Extract a JSON timeseries from a netCDF file."""
# Open the netCDF file
input_file = nc.netcdf_file(args.infile, 'r',
mmap=False)
# Grab the variable
input_var = input_file.variables[args.varname]
# Test that the required x and y fits.
val_shape = input_var.shape
if (args.x_val > input_var.shape[2] or
args.y_val > input_var.shape[1]):
input_file.close()
print("ERROR: Given x or y vals are no good!")
sys.exit(1)
# Extract the data
output_data = input_var[:, args.y_val, args.x_val]
# Extract the associated times
time_var = input_file.variables["time"]
# This is a little brittle - this extraction makes an
# assumption about the use of a standard calendar.
units_re = r"^(?P<unit>\S+) since (?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+).+$"
match_dict = re.match(units_re, time_var.units).groupdict()
start_date = dt.date(int(match_dict["year"]), int(match_dict["month"]),
int(match_dict["day"]))
if match_dict["unit"] != "days":
raise Exception("Time unit: {} not understood"
.format(match_dict["unit"]))
if time_var.calendar != "standard":
print("WARNING: calendar {} is not standard. JSON date output may be incorrect"
.format(time_var.calendar))
output_times = map(lambda date: start_date + dt.timedelta(days=int(date)),
time_var)
output_strings = [datething.isoformat()
for datething in output_times]
# Write it out using json.dumps
output = {"times": output_strings,
args.varname: output_data.tolist()}
with open(args.outfile, 'w') as output_file:
output_file.write(json.dumps(output))
# Close the netCDF file
input_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Extract a JSON timeseries from a netCDF")
parser.add_argument("infile", help="The input netCDF")
parser.add_argument("outfile", help="The output JSON")
parser.add_argument("varname", help="The variable to extract")
parser.add_argument("x_val", help="The x value to extract",
type=int)
parser.add_argument("y_val", help="The y value to extract",
type=int)
args = parser.parse_args()
main(args)
| apache-2.0 |
homework/nox | src/nox/lib/packet/t/dns_parse_test.py | 10 | 5255 | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
from nox.lib.packet.dns import *
from nox.coreapps.testharness.testdefs import *
# Copied from curses so you don't have to import
# another package which is not standard on some
# Linux distros
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isascii(c): return _ctoi(c) <= 127
dns_test_1 = "\xa1\xdb\x81\x80\x00\x01\x00\x02\x00\x09\x00\x04\x05\x66\x61\x72\x6d\x33\x06\x73\x74\x61\x74\x69\x63\x06\x66\x6c\x69\x63\x6b\x72\x03\x63\x6f\x6d\x00\x00\x01\x00\x01\xc0\x0c\x00\x05\x00\x01\x00\x00\x01\x2c\x00\x27\x05\x66\x61\x72\x6d\x33\x06\x73\x74\x61\x74\x69\x63\x06\x66\x6c\x69\x63\x6b\x72\x06\x79\x61\x68\x6f\x6f\x33\x06\x61\x6b\x61\x64\x6e\x73\x03\x6e\x65\x74\x00\xc0\x35\x00\x01\x00\x01\x00\x00\x01\x2c\x00\x04\x45\x93\x5a\x9e\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x08\x05\x61\x73\x69\x61\x39\xc0\x50\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x0f\x02\x7a\x61\x06\x61\x6b\x61\x64\x6e\x73\x03\x6f\x72\x67\x00\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x05\x02\x7a\x64\xc0\x8f\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x05\x02\x7a\x62\xc0\x8f\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x07\x04\x65\x75\x72\x31\xc0\x50\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x07\x04\x75\x73\x65\x33\xc0\x50\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x07\x04\x75\x73\x77\x32\xc0\x50\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x05\x02\x7a\x63\xc0\x8f\xc0\x50\x00\x02\x00\x01\x00\x02\x85\xba\x00\x07\x04\x75\x73\x65\x34\xc0\x50\xc0\x8c\x00\x01\x00\x01\x00\x02\x85\xbb\x00\x04\xc3\xdb\x03\xa9\xc0\xb8\x00\x01\x00\x01\x00\x02\x85\xbb\x00\x04\xce\x84\x64\x69\xc1\x02\x00\x01\x00\x01\x00\x02\x85\xbb\x00\x04\x7c\xd3\x28\x04\xc0\xa7\x00\x01\x00\x01\x00\x02\x85\xbb\x00\x04\x3f\xd1\x03\x84"
dns_test_2 = "\xb8\xc1\x81\x80\x00\x01\x00\x08\x00\x09\x00\x04\x05\x70\x69\x78\x65\x6c\x0a\x71\x75\x61\x6e\x74\x73\x65\x72\x76\x65\x03\x63\x6f\x6d\x00\x00\x01\x00\x01\xc0\x0c\x00\x05\x00\x01\x00\x01\x50\x54\x00\x1f\x03\x67\x65\x6f\x0a\x71\x75\x61\x6e\x74\x73\x65\x72\x76\x65\x03\x63\x6f\x6d\x06\x61\x6b\x61\x64\x6e\x73\x03\x6e\x65\x74\x00\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x26\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x24\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x22\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x20\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x23\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x21\xc0\x32\x00\x01\x00\x01\x00\x00\x00\x78\x00\x04\x04\x4e\xf3\x25\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x07\x04\x75\x73\x77\x32\xc0\x45\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x07\x04\x75\x73\x65\x33\xc0\x45\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x0f\x02\x7a\x62\x06\x61\x6b\x61\x64\x6e\x73\x03\x6f\x72\x67\x00\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x05\x02\x7a\x61\xc0\xf6\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x05\x02\x7a\x63\xc0\xf6\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x05\x02\x7a\x64\xc0\xf6\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x08\x05\x61\x73\x69\x61\x39\xc0\x45\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x07\x04\x75\x73\x65\x34\xc0\x45\xc0\x45\x00\x02\x00\x01\x00\x01\x8f\xe9\x00\x07\x04\x65\x75\x72\x31\xc0\x45\xc1\x0e\x00\x01\x00\x01\x00\x01\x8f\xea\x00\x04\xc3\xdb\x03\xa9\xc0\xf3\x00\x01\x00\x01\x00\x01\x8f\xea\x00\x04\xce\x84\x64\x69\xc1\x1f\x00\x01\x00\x01\x00\x01\x8f\xea\x00\x04\x7c\xd3\x28\x04\xc1\x30\x00\x01\x00\x01\x00\x01\x8f\xea\x00\x04\x3f\xd1\x03\x84"
def check_string_for_nonascii(checkme):
for char in checkme:
if not isascii(char):
nox_test_assert(0, 'Not ASCII')
def check_name(list):
for item in list:
check_string_for_nonascii(item.name)
def test_dns_2():
"""test complex dns packet"""
d = dns(dns_test_2)
assert(d)
nox_test_assert(len(d.questions) == 1, 'Questions')
nox_test_assert(len(d.answers) == 8, 'Answers')
nox_test_assert(len(d.authorities) == 9, 'Authorities')
nox_test_assert(len(d.additional) == 4, 'Additional')
check_name(d.questions)
check_name(d.answers)
check_name(d.authorities)
check_name(d.additional)
def test_dns_1():
"""test complex dns packet"""
d = dns(dns_test_1)
assert(d)
nox_test_assert(len(d.questions) == 1, 'Questions')
nox_test_assert(len(d.answers) == 2, 'Answers')
nox_test_assert(len(d.authorities) == 9, 'Authorities')
nox_test_assert(len(d.additional) == 4, 'Additional')
check_name(d.questions)
check_name(d.answers)
check_name(d.authorities)
check_name(d.additional)
| gpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/contrib/gis/sitemaps/georss.py | 314 | 2134 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.gis.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))
| apache-2.0 |
ovnicraft/server-tools | auth_dynamic_groups/model/res_groups.py | 23 | 2540 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
from openerp.tools.safe_eval import safe_eval
from openerp import _
class res_groups(models.Model):
_inherit = 'res.groups'
is_dynamic = fields.Boolean('Dynamic')
dynamic_group_condition = fields.Text(
'Condition', help='The condition to be met for a user to be a '
'member of this group. It is evaluated as python code at login '
'time, you get `user` passed as a browse record')
@api.multi
def eval_dynamic_group_condition(self, uid=None):
user = self.env['res.users'].browse([uid]) if uid else self.env.user
result = all(
self.mapped(
lambda this: safe_eval(
this.dynamic_group_condition or 'False',
{
'user': user.sudo(),
'any': any,
'all': all,
'filter': filter,
})))
return result
@api.multi
@api.constrains('dynamic_group_condition')
def _check_dynamic_group_condition(self):
try:
self.filtered('is_dynamic').eval_dynamic_group_condition()
except (NameError, SyntaxError, TypeError):
raise exceptions.ValidationError(
_('The condition doesn\'t evaluate correctly!'))
@api.multi
def action_evaluate(self):
res_users = self.env['res.users']
for user in res_users.search([]):
res_users.update_dynamic_groups(user.id, self.env.cr.dbname)
| agpl-3.0 |
basiccoin/basiccoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
jeffknupp/sandman | tests/test_foreign_keys.py | 5 | 1692 | """Test foreign key edge cases."""
import os
import shutil
import json
from sandman import app
class TestSandmanForeignKeysBase(object):
"""Class to test edge-case foreign key conditions, using a database
explicitly built to contain these cases."""
DB_LOCATION = os.path.join(os.getcwd(), 'tests', 'foreign_key.sqlite3')
def setup_method(self, _):
"""Grab the database file from the *data* directory and configure the
app."""
shutil.copy(
os.path.join(
os.getcwd(),
'tests',
'data',
'foreign_key.sqlite3'),
self.DB_LOCATION)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////' + self.DB_LOCATION
app.config['SANDMAN_SHOW_PKS'] = False
app.config['SANDMAN_GENERATE_PKS'] = True
app.config['TESTING'] = True
self.app = app.test_client()
# pylint: disable=unused-variable
from . import foreign_key_models
def test_get(self):
"""Test simple HTTP GET, enough to cover all cases for now."""
response = self.app.get('/job_schedules')
assert len(json.loads(response.get_data(as_text=True))[u'resources']) == 1
def test_date_time(self):
"""Test serializing a datetime object works properly."""
response = self.app.get('/date_times')
assert len(json.loads(response.get_data(as_text=True))[u'resources']) == 1
def teardown_method(self, _):
"""Remove the database file copied during setup."""
os.unlink(self.DB_LOCATION)
# pylint: disable=attribute-defined-outside-init
self.app = None
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/test/test_nntplib.py | 45 | 1652 | import socket
import nntplib
import time
import unittest
try:
import threading
except ImportError:
threading = None
from unittest import TestCase
from test import test_support
HOST = test_support.HOST
def server(evt, serv, evil=False):
serv.listen(5)
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
if evil:
conn.send("1 I'm too long response" * 3000 + "\n")
else:
conn.send("1 I'm OK response\n")
conn.close()
finally:
serv.close()
evt.set()
class BaseServerTest(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(
target=server,
args=(self.evt, self.sock, self.evil)).start()
time.sleep(.1)
def tearDown(self):
self.evt.wait()
@unittest.skipUnless(threading, 'threading required')
class ServerTests(BaseServerTest):
evil = False
def test_basic_connect(self):
nntp = nntplib.NNTP('localhost', self.port)
nntp.sock.close()
@unittest.skipUnless(threading, 'threading required')
class EvilServerTests(BaseServerTest):
evil = True
def test_too_long_line(self):
self.assertRaises(nntplib.NNTPDataError,
nntplib.NNTP, 'localhost', self.port)
def test_main(verbose=None):
test_support.run_unittest(EvilServerTests)
test_support.run_unittest(ServerTests)
if __name__ == '__main__':
test_main()
| mit |
hlkline/SU2 | SU2_PY/set_ffd_design_var.py | 2 | 9784 | #!/usr/bin/env python
## \file set_ffd_design_var.py
# \brief Python script for automatically generating a list of FFD variables.
# \author T. Economon, F. Palacios
# \version 6.1.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colonna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2018, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# make print(*args) function available in PY2.6+, does'nt work on PY < 2.6
from __future__ import print_function
from optparse import OptionParser
from numpy import *
parser = OptionParser()
parser.add_option("-i", "--iDegree", dest="iDegree", default=4,
help="i degree of the FFD box", metavar="IDEGREE")
parser.add_option("-j", "--jDegree", dest="jDegree", default=4,
help="j degree of the FFD box", metavar="JDEGREE")
parser.add_option("-k", "--kDegree", dest="kDegree", default=1,
help="k degree of the FFD box", metavar="KDEGREE")
parser.add_option("-b", "--ffdid", dest="ffd_id", default=0,
help="ID of the FFD box", metavar="FFD_ID")
parser.add_option("-m", "--marker", dest="marker",
help="marker name of the design surface", metavar="MARKER")
parser.add_option("-a", "--axis", dest="axis",
help="axis to define twist 'x_Orig, y_Orig, z_Orig, x_End, y_End, z_End'", metavar="AXIS")
parser.add_option("-s", "--scale", dest="scale", default=1.0,
help="scale factor for the bump functions", metavar="SCALE")
parser.add_option("-d", "--dimension", dest="dimension", default=3.0,
help="dimension of the problem", metavar="DIMENSION")
(options, args)=parser.parse_args()
# Process options
options.iOrder = int(options.iDegree) + 1
options.jOrder = int(options.jDegree) + 1
options.kOrder = int(options.kDegree) + 1
options.ffd_id = str(options.ffd_id)
options.marker = str(options.marker)
options.axis = str(options.axis)
options.scale = float(options.scale)
options.dim = int(options.dimension)
if options.dim == 3:
print(" ")
print("% FFD_CONTROL_POINT (X)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder):
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 7, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", " + str(kIndex) + ", 1.0, 0.0, 0.0 )"
if iVariable < (options.iOrder*(options.jOrder)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_CONTROL_POINT (Y)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder):
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 7, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", " + str(kIndex) + ", 0.0, 1.0, 0.0 )"
if iVariable < (options.iOrder*(options.jOrder)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_CONTROL_POINT (Z)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder):
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 7, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", " + str(kIndex) + ", 0.0, 0.0, 1.0 )"
if iVariable < (options.iOrder*(options.jOrder)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_NACELLE (RHO)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder):
for jIndex in range(1+options.jOrder/2):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 22, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", " + str(kIndex) + ", 1.0, 0.0 )"
if iVariable < (options.iOrder*(1+options.jOrder/2)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_NACELLE (PHI)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder):
for jIndex in range(1+options.jOrder/2):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 22, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", " + str(kIndex) + ", 0.0, 1.0 )"
if iVariable < (options.iOrder*(1+options.jOrder/2)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_CONTROL_POINT (Z) (MULTIPLE INTERSECTIONS)")
iVariable = 0
dvList = "DEFINITION_DV= "
for kIndex in range(options.kOrder-4):
for jIndex in range(options.jOrder-4):
for iIndex in range(options.iOrder-4):
iVariable = iVariable + 1
dvList = dvList + "( 7, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex+2) + ", " + str(jIndex+2) + ", " + str(kIndex+2) + ", 0.0, 0.0, 1.0 )"
if iVariable < (options.iOrder*(options.jOrder)*options.kOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_CAMBER, FFD_THICKNESS, FFS_TWIST")
iVariable = 0
dvList = "DEFINITION_DV= "
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 11, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + " )"
dvList = dvList + "; "
iVariable = 0
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 12, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + " )"
dvList = dvList + "; "
iVariable = 0
for jIndex in range(options.jOrder):
iVariable = iVariable + 1
dvList = dvList + "( 19, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(jIndex) + ", " + options.axis + " )"
if iVariable < (options.jOrder):
dvList = dvList + "; "
print(dvList)
if options.dim == 2:
print(" ")
print("% FFD_CONTROL_POINT (X)")
iVariable = 0
dvList = "DEFINITION_DV= "
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 15, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", 1.0, 0.0 )"
if iVariable < (options.iOrder*options.jOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("% FFD_CONTROL_POINT (Y)")
iVariable = 0
dvList = "DEFINITION_DV= "
for jIndex in range(options.jOrder):
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 15, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + ", " + str(jIndex) + ", 0.0, 1.0 )"
if iVariable < (options.iOrder*options.jOrder):
dvList = dvList + "; "
print(dvList)
print(" ")
print("FFD_CAMBER & FFD_THICKNESS")
iVariable = 0
dvList = "DEFINITION_DV= "
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 16, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + " )"
dvList = dvList + "; "
iVariable = 0
for iIndex in range(options.iOrder):
iVariable = iVariable + 1
dvList = dvList + "( 17, " + str(options.scale) + " | " + options.marker + " | "
dvList = dvList + options.ffd_id + ", " + str(iIndex) + " )"
if iVariable < (options.iOrder):
dvList = dvList + "; "
print(dvList)
| lgpl-2.1 |
dlazz/ansible | lib/ansible/galaxy/login.py | 2 | 4573 | ########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.six.moves import input
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlparse
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
if github_token is None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("Github login", 'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') +
", only to " + stringc("api.github.com.", "yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token", 'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = input("Github Username: ")
except Exception:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except Exception:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid Github credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username,
url_password=self.github_password, method='DELETE', force_basic_auth=True)
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
| gpl-3.0 |
anryko/ansible | test/units/modules/network/edgeswitch/test_edgeswitch_facts.py | 23 | 3092 | # (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.edgeswitch import edgeswitch_facts
from units.modules.utils import set_module_args
from .edgeswitch_module import TestEdgeswitchModule, load_fixture
class TestEdgeswitchFactsModule(TestEdgeswitchModule):
module = edgeswitch_facts
def setUp(self):
super(TestEdgeswitchFactsModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.edgeswitch.edgeswitch_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEdgeswitchFactsModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module = args
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('edgeswitch_facts_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_edgeswitch_facts_default(self):
set_module_args(dict(gather_subset=['all', '!interfaces', '!config']))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 5)
self.assertEqual(facts['ansible_net_hostname'], 'sw_test_1')
self.assertEqual(facts['ansible_net_serialnum'], 'F09FC2EFD310')
self.assertEqual(facts['ansible_net_version'], '1.7.4.5075842')
def test_edgeswitch_facts_interfaces(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 6)
self.assertEqual(facts['ansible_net_interfaces']['0/1']['operstatus'], 'Enable')
self.assertEqual(facts['ansible_net_interfaces']['0/2']['mediatype'], '2.5G-BaseFX')
self.assertEqual(facts['ansible_net_interfaces']['0/3']['physicalstatus'], '10G Full')
self.assertEqual(facts['ansible_net_interfaces']['0/4']['lineprotocol'], 'Up')
self.assertEqual(facts['ansible_net_interfaces']['0/15']['description'], 'UPLINK VIDEO WITH A VERY LONG DESCRIPTION THAT HELPS NO ONE')
| gpl-3.0 |
ajdavis/asyncio | examples/child_process.py | 10 | 3769 | """
Example of asynchronous interaction with a child python process.
This example shows how to attach an existing Popen object and use the low level
transport-protocol API. See shell.py and subprocess_shell.py for higher level
examples.
"""
import os
import sys
try:
import asyncio
except ImportError:
# asyncio is not installed
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import asyncio
if sys.platform == 'win32':
from asyncio.windows_utils import Popen, PIPE
from asyncio.windows_events import ProactorEventLoop
else:
from subprocess import Popen, PIPE
#
# Return a write-only transport wrapping a writable pipe
#
@asyncio.coroutine
def connect_write_pipe(file):
loop = asyncio.get_event_loop()
transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
return transport
#
# Wrap a readable pipe in a stream
#
@asyncio.coroutine
def connect_read_pipe(file):
loop = asyncio.get_event_loop()
stream_reader = asyncio.StreamReader(loop=loop)
def factory():
return asyncio.StreamReaderProtocol(stream_reader)
transport, _ = yield from loop.connect_read_pipe(factory, file)
return stream_reader, transport
#
# Example
#
@asyncio.coroutine
def main(loop):
# program which prints evaluation of each expression from stdin
code = r'''if 1:
import os
def writeall(fd, buf):
while buf:
n = os.write(fd, buf)
buf = buf[n:]
while True:
s = os.read(0, 1024)
if not s:
break
s = s.decode('ascii')
s = repr(eval(s)) + '\n'
s = s.encode('ascii')
writeall(1, s)
'''
# commands to send to input
commands = iter([b"1+1\n",
b"2**16\n",
b"1/3\n",
b"'x'*50",
b"1/0\n"])
# start subprocess and wrap stdin, stdout, stderr
p = Popen([sys.executable, '-c', code],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdin = yield from connect_write_pipe(p.stdin)
stdout, stdout_transport = yield from connect_read_pipe(p.stdout)
stderr, stderr_transport = yield from connect_read_pipe(p.stderr)
# interact with subprocess
name = {stdout:'OUT', stderr:'ERR'}
registered = {asyncio.Task(stderr.readline()): stderr,
asyncio.Task(stdout.readline()): stdout}
while registered:
# write command
cmd = next(commands, None)
if cmd is None:
stdin.close()
else:
print('>>>', cmd.decode('ascii').rstrip())
stdin.write(cmd)
# get and print lines from stdout, stderr
timeout = None
while registered:
done, pending = yield from asyncio.wait(
registered, timeout=timeout,
return_when=asyncio.FIRST_COMPLETED)
if not done:
break
for f in done:
stream = registered.pop(f)
res = f.result()
print(name[stream], res.decode('ascii').rstrip())
if res != b'':
registered[asyncio.Task(stream.readline())] = stream
timeout = 0.0
stdout_transport.close()
stderr_transport.close()
if __name__ == '__main__':
if sys.platform == 'win32':
loop = ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main(loop))
finally:
loop.close()
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/Batch-63/14-files/myenv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py | 384 | 6309 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import base
from .. import _ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("ascii", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
try:
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
except AttributeError:
pass
try:
node = et.getroot()
except AttributeError:
node = et
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
else:
self.fragmentChildren = set()
tree = Root(tree)
base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = _ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (base.DOCUMENT,)
elif isinstance(node, Doctype):
return base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return base.TEXT, ensure_str(node.obj)
elif node.tag == etree.Comment:
return base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
elif node in self.fragmentChildren:
return None
return node.getparent()
| gpl-3.0 |
peeyush-tm/check_mk | web/plugins/views/webservice.py | 1 | 6788 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def render_python_raw(data, view, group_painters, painters, num_columns, show_checkboxes):
html.write(repr(data))
multisite_layouts["python-raw"] = {
"title" : _("Python raw data output"),
"render" : render_python_raw,
"group" : False,
"hide" : True,
}
def render_python(rows, view, group_painters, painters, num_columns, show_checkboxes):
html.write("[\n")
html.write(repr([p[0]["name"] for p in painters]))
html.write(",\n")
for row in rows:
html.write("[")
for p in painters:
joined_row = join_row(row, p)
tdclass, content = paint_painter(p[0], joined_row)
html.write(repr(html.strip_tags(content)))
html.write(",")
html.write("],")
html.write("\n]\n")
multisite_layouts["python"] = {
"title" : _("Python data output"),
"render" : render_python,
"group" : False,
"hide" : True,
}
json_escape = re.compile(r'[\\"\r\n\t\b\f\x00-\x1f]')
json_encoding_table = dict([(chr(i), '\\u%04x' % i) for i in range(32)])
json_encoding_table.update({'\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\\': '\\\\', '"': '\\"' })
def encode_string_json(s):
return '"' + json_escape.sub(lambda m: json_encoding_table[m.group(0)], s) + '"'
def render_json(rows, view, group_painters, painters, num_columns, show_checkboxes, export = False):
if export:
html.req.content_type = "appliation/json; charset=UTF-8"
filename = '%s-%s.json' % (view['name'], time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))
if type(filename) == unicode:
filename = filename.encode("utf-8")
html.req.headers_out['Content-Disposition'] = 'Attachment; filename=%s' % filename
html.write("[\n")
first = True
html.write("[")
for p in painters:
if first:
first = False
else:
html.write(",")
content = p[0]["name"]
stripped = html.strip_tags(content)
utf8 = stripped.encode("utf-8")
html.write(encode_string_json(utf8))
html.write("]")
for row in rows:
html.write(",\n[")
first = True
for p in painters:
if first:
first = False
else:
html.write(",")
joined_row = join_row(row, p)
tdclass, content = paint_painter(p[0], joined_row)
if type(content) == unicode:
content = content.encode("utf-8")
else:
content = str(content)
content = content.replace("<br>","\n")
stripped = html.strip_tags(content)
html.write(encode_string_json(stripped))
html.write("]")
html.write("\n]\n")
multisite_layouts["json_export"] = {
"title" : _("JSON data export"),
"render" : lambda a,b,c,d,e,f: render_json(a,b,c,d,e,f,True),
"group" : False,
"hide" : True,
}
multisite_layouts["json"] = {
"title" : _("JSON data output"),
"render" : lambda a,b,c,d,e,f: render_json(a,b,c,d,e,f,False),
"group" : False,
"hide" : True,
}
def render_jsonp(rows, view, group_painters, painters, num_columns, show_checkboxes):
html.write("%s(\n" % html.var('jsonp'));
render_json(rows, view, group_painters, painters, num_columns, show_checkboxes)
html.write(");\n");
multisite_layouts["jsonp"] = {
"title" : _("JSONP data output"),
"render" : render_jsonp,
"group" : False,
"hide" : True,
}
def render_csv(rows, view, group_painters, painters, num_columns, show_checkboxes, export = False):
if export:
html.req.content_type = "text/csv; charset=UTF-8"
filename = '%s-%s.csv' % (view['name'], time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))
if type(filename) == unicode:
filename = filename.encode("utf-8")
html.req.headers_out['Content-Disposition'] = 'Attachment; filename=%s' % filename
csv_separator = html.var("csv_separator", ";")
first = True
for p in painters:
if first:
first = False
else:
html.write(csv_separator)
content = p[0]["name"]
content = type(content) in [ int, float ] and str(content) or content
stripped = html.strip_tags(content).replace('\n', '').replace('"', '""')
html.write('"%s"' % stripped.encode("utf-8"))
for row in rows:
html.write("\n")
first = True
for p in painters:
if first:
first = False
else:
html.write(csv_separator)
joined_row = join_row(row, p)
tdclass, content = paint_painter(p[0], joined_row)
content = type(content) in [ int, float ] and str(content) or content
stripped = html.strip_tags(content).replace('\n', '').replace('"', '""')
html.write('"%s"' % stripped.encode("utf-8"))
multisite_layouts["csv_export"] = {
"title" : _("CSV data export"),
"render" : lambda a,b,c,d,e,f: render_csv(a,b,c,d,e,f,True),
"group" : False,
"hide" : True,
}
multisite_layouts["csv"] = {
"title" : _("CSV data output"),
"render" : lambda a,b,c,d,e,f: render_csv(a,b,c,d,e,f,False),
"group" : False,
"hide" : True,
}
| gpl-2.0 |
TalShafir/ansible | lib/ansible/modules/network/netscaler/netscaler_lb_vserver.py | 101 | 72334 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_lb_vserver
short_description: Manage load balancing vserver configuration
description:
- Manage load balancing vserver configuration
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the virtual server. Must begin with an ASCII alphanumeric or underscore C(_) character, and
must contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at sign
C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the virtual server is created.
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'DNS'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'RTSP'
- 'PUSH'
- 'SSL_PUSH'
- 'RADIUS'
- 'RDP'
- 'MYSQL'
- 'MSSQL'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'ORACLE'
- 'SMPP'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol used by the service (also called the service type)."
ipv46:
description:
- "IPv4 or IPv6 address to assign to the virtual server."
ippattern:
description:
- >-
IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual
server. The IP Mask parameter specifies which part of the destination IP address is matched against
the pattern. Mutually exclusive with the IP Address parameter.
- >-
For example, if the IP pattern assigned to the virtual server is C(198.51.100.0) and the IP mask is
C(255.255.240.0) (a forward mask), the first 20 bits in the destination IP addresses are matched with
the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range
from C(198.51.96.1) to C(198.51.111.254). You can also use a pattern such as C(0.0.2.2) and a mask such as
C(0.0.255.255) (a reverse mask).
- >-
If a destination IP address matches more than one IP pattern, the pattern with the longest match is
selected, and the associated virtual server processes the request. For example, if virtual servers
C(vs1) and C(vs2) have the same IP pattern, C(0.0.100.128), but different IP masks of C(0.0.255.255) and
C(0.0.224.255), a destination IP address of C(198.51.100.128) has the longest match with the IP pattern of
vs1. If a destination IP address matches two or more virtual servers to the same extent, the request
is processed by the virtual server whose port number matches the port number in the request.
ipmask:
description:
- >-
IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing
non-zero octets (for example, C(255.255.240.0) or C(0.0.255.255)). Accordingly, the mask specifies whether
the first n bits or the last n bits of the destination IP address in a client request are to be
matched with the corresponding bits in the IP pattern. The former is called a forward mask. The
latter is called a reverse mask.
port:
description:
- "Port number for the virtual server."
- "Range C(1) - C(65535)"
- "* in CLI is represented as C(65535) in NITRO API"
range:
description:
- >-
Number of IP addresses that the appliance must generate and assign to the virtual server. The virtual
server then functions as a network virtual server, accepting traffic on any of the generated IP
addresses. The IP addresses are generated automatically, as follows:
- >-
* For a range of n, the last octet of the address specified by the IP Address parameter increments
n-1 times.
- "* If the last octet exceeds 255, it rolls over to 0 and the third octet increments by 1."
- >-
Note: The Range parameter assigns multiple IP addresses to one virtual server. To generate an array
of virtual servers, each of which owns only one IP address, use brackets in the IP Address and Name
parameters to specify the range. For example:
- "add lb vserver my_vserver[1-3] HTTP 192.0.2.[1-3] 80."
- "Minimum value = C(1)"
- "Maximum value = C(254)"
persistencetype:
choices:
- 'SOURCEIP'
- 'COOKIEINSERT'
- 'SSLSESSION'
- 'RULE'
- 'URLPASSIVE'
- 'CUSTOMSERVERID'
- 'DESTIP'
- 'SRCIPDESTIP'
- 'CALLID'
- 'RTSPSID'
- 'DIAMETER'
- 'FIXSESSION'
- 'NONE'
description:
- "Type of persistence for the virtual server. Available settings function as follows:"
- "* C(SOURCEIP) - Connections from the same client IP address belong to the same persistence session."
- >-
* C(COOKIEINSERT) - Connections that have the same HTTP Cookie, inserted by a Set-Cookie directive from
a server, belong to the same persistence session.
- "* C(SSLSESSION) - Connections that have the same SSL Session ID belong to the same persistence session."
- >-
* C(CUSTOMSERVERID) - Connections with the same server ID form part of the same session. For this
persistence type, set the Server ID (CustomServerID) parameter for each service and configure the
Rule parameter to identify the server ID in a request.
- "* C(RULE) - All connections that match a user defined rule belong to the same persistence session."
- >-
* C(URLPASSIVE) - Requests that have the same server ID in the URL query belong to the same persistence
session. The server ID is the hexadecimal representation of the IP address and port of the service to
which the request must be forwarded. This persistence type requires a rule to identify the server ID
in the request.
- "* C(DESTIP) - Connections to the same destination IP address belong to the same persistence session."
- >-
* C(SRCIPDESTIP) - Connections that have the same source IP address and destination IP address belong to
the same persistence session.
- "* C(CALLID) - Connections that have the same CALL-ID SIP header belong to the same persistence session."
- "* C(RTSPSID) - Connections that have the same RTSP Session ID belong to the same persistence session."
- >-
* FIXSESSION - Connections that have the same SenderCompID and TargetCompID values belong to the same
persistence session.
timeout:
description:
- "Time period for which a persistence session is in effect."
- "Minimum value = C(0)"
- "Maximum value = C(1440)"
persistencebackup:
choices:
- 'SOURCEIP'
- 'NONE'
description:
- >-
Backup persistence type for the virtual server. Becomes operational if the primary persistence
mechanism fails.
backuppersistencetimeout:
description:
- "Time period for which backup persistence is in effect."
- "Minimum value = C(2)"
- "Maximum value = C(1440)"
lbmethod:
choices:
- 'ROUNDROBIN'
- 'LEASTCONNECTION'
- 'LEASTRESPONSETIME'
- 'URLHASH'
- 'DOMAINHASH'
- 'DESTINATIONIPHASH'
- 'SOURCEIPHASH'
- 'SRCIPDESTIPHASH'
- 'LEASTBANDWIDTH'
- 'LEASTPACKETS'
- 'TOKEN'
- 'SRCIPSRCPORTHASH'
- 'LRTM'
- 'CALLIDHASH'
- 'CUSTOMLOAD'
- 'LEASTREQUEST'
- 'AUDITLOGHASH'
- 'STATICPROXIMITY'
description:
- "Load balancing method. The available settings function as follows:"
- >-
* C(ROUNDROBIN) - Distribute requests in rotation, regardless of the load. Weights can be assigned to
services to enforce weighted round robin distribution.
- "* C(LEASTCONNECTION) (default) - Select the service with the fewest connections."
- "* C(LEASTRESPONSETIME) - Select the service with the lowest average response time."
- "* C(LEASTBANDWIDTH) - Select the service currently handling the least traffic."
- "* C(LEASTPACKETS) - Select the service currently serving the lowest number of packets per second."
- "* C(CUSTOMLOAD) - Base service selection on the SNMP metrics obtained by custom load monitors."
- >-
* C(LRTM) - Select the service with the lowest response time. Response times are learned through
monitoring probes. This method also takes the number of active connections into account.
- >-
Also available are a number of hashing methods, in which the appliance extracts a predetermined
portion of the request, creates a hash of the portion, and then checks whether any previous requests
had the same hash value. If it finds a match, it forwards the request to the service that served
those previous requests. Following are the hashing methods:
- "* C(URLHASH) - Create a hash of the request URL (or part of the URL)."
- >-
* C(DOMAINHASH) - Create a hash of the domain name in the request (or part of the domain name). The
domain name is taken from either the URL or the Host header. If the domain name appears in both
locations, the URL is preferred. If the request does not contain a domain name, the load balancing
method defaults to C(LEASTCONNECTION).
- "* C(DESTINATIONIPHASH) - Create a hash of the destination IP address in the IP header."
- "* C(SOURCEIPHASH) - Create a hash of the source IP address in the IP header."
- >-
* C(TOKEN) - Extract a token from the request, create a hash of the token, and then select the service
to which any previous requests with the same token hash value were sent.
- >-
* C(SRCIPDESTIPHASH) - Create a hash of the string obtained by concatenating the source IP address and
destination IP address in the IP header.
- "* C(SRCIPSRCPORTHASH) - Create a hash of the source IP address and source port in the IP header."
- "* C(CALLIDHASH) - Create a hash of the SIP Call-ID header."
hashlength:
description:
- >-
Number of bytes to consider for the hash value used in the URLHASH and DOMAINHASH load balancing
methods.
- "Minimum value = C(1)"
- "Maximum value = C(4096)"
netmask:
description:
- >-
IPv4 subnet mask to apply to the destination IP address or source IP address when the load balancing
method is C(DESTINATIONIPHASH) or C(SOURCEIPHASH).
- "Minimum length = 1"
v6netmasklen:
description:
- >-
Number of bits to consider in an IPv6 destination or source IP address, for creating the hash that is
required by the C(DESTINATIONIPHASH) and C(SOURCEIPHASH) load balancing methods.
- "Minimum value = C(1)"
- "Maximum value = C(128)"
backuplbmethod:
choices:
- 'ROUNDROBIN'
- 'LEASTCONNECTION'
- 'LEASTRESPONSETIME'
- 'SOURCEIPHASH'
- 'LEASTBANDWIDTH'
- 'LEASTPACKETS'
- 'CUSTOMLOAD'
description:
- "Backup load balancing method. Becomes operational if the primary load balancing me"
- "thod fails or cannot be used."
- "Valid only if the primary method is based on static proximity."
cookiename:
description:
- >-
Use this parameter to specify the cookie name for C(COOKIE) peristence type. It specifies the name of
cookie with a maximum of 32 characters. If not specified, cookie name is internally generated.
listenpolicy:
description:
- >-
Default syntax expression identifying traffic accepted by the virtual server. Can be either an
expression (for example, C(CLIENT.IP.DST.IN_SUBNET(192.0.2.0/24)) or the name of a named expression. In
the above example, the virtual server accepts all requests whose destination IP address is in the
192.0.2.0/24 subnet.
listenpriority:
description:
- >-
Integer specifying the priority of the listen policy. A higher number specifies a lower priority. If
a request matches the listen policies of more than one virtual server the virtual server whose listen
policy has the highest priority (the lowest priority number) accepts the request.
- "Minimum value = C(0)"
- "Maximum value = C(101)"
resrule:
description:
- >-
Default syntax expression specifying which part of a server's response to use for creating rule based
persistence sessions (persistence type RULE). Can be either an expression or the name of a named
expression.
- "Example:"
- "C(HTTP.RES.HEADER(\\"setcookie\\").VALUE(0).TYPECAST_NVLIST_T('=',';').VALUE(\\"server1\\"))."
persistmask:
description:
- "Persistence mask for IP based persistence types, for IPv4 virtual servers."
- "Minimum length = 1"
v6persistmasklen:
description:
- "Persistence mask for IP based persistence types, for IPv6 virtual servers."
- "Minimum value = C(1)"
- "Maximum value = C(128)"
rtspnat:
description:
- "Use network address translation (NAT) for RTSP data connections."
type: bool
m:
choices:
- 'IP'
- 'MAC'
- 'IPTUNNEL'
- 'TOS'
description:
- "Redirection mode for load balancing. Available settings function as follows:"
- >-
* C(IP) - Before forwarding a request to a server, change the destination IP address to the server's IP
address.
- >-
* C(MAC) - Before forwarding a request to a server, change the destination MAC address to the server's
MAC address. The destination IP address is not changed. MAC-based redirection mode is used mostly in
firewall load balancing deployments.
- >-
* C(IPTUNNEL) - Perform IP-in-IP encapsulation for client IP packets. In the outer IP headers, set the
destination IP address to the IP address of the server and the source IP address to the subnet IP
(SNIP). The client IP packets are not modified. Applicable to both IPv4 and IPv6 packets.
- "* C(TOS) - Encode the virtual server's TOS ID in the TOS field of the IP header."
- "You can use either the C(IPTUNNEL) or the C(TOS) option to implement Direct Server Return (DSR)."
tosid:
description:
- >-
TOS ID of the virtual server. Applicable only when the load balancing redirection mode is set to TOS.
- "Minimum value = C(1)"
- "Maximum value = C(63)"
datalength:
description:
- >-
Length of the token to be extracted from the data segment of an incoming packet, for use in the token
method of load balancing. The length of the token, specified in bytes, must not be greater than 24
KB. Applicable to virtual servers of type TCP.
- "Minimum value = C(1)"
- "Maximum value = C(100)"
dataoffset:
description:
- >-
Offset to be considered when extracting a token from the TCP payload. Applicable to virtual servers,
of type TCP, using the token method of load balancing. Must be within the first 24 KB of the TCP
payload.
- "Minimum value = C(0)"
- "Maximum value = C(25400)"
sessionless:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Perform load balancing on a per-packet basis, without establishing sessions. Recommended for load
balancing of intrusion detection system (IDS) servers and scenarios involving direct server return
(DSR), where session information is unnecessary.
connfailover:
choices:
- 'DISABLED'
- 'STATEFUL'
- 'STATELESS'
description:
- >-
Mode in which the connection failover feature must operate for the virtual server. After a failover,
established TCP connections and UDP packet flows are kept active and resumed on the secondary
appliance. Clients remain connected to the same servers. Available settings function as follows:
- >-
* C(STATEFUL) - The primary appliance shares state information with the secondary appliance, in real
time, resulting in some runtime processing overhead.
- >-
* C(STATELESS) - State information is not shared, and the new primary appliance tries to re-create the
packet flow on the basis of the information contained in the packets it receives.
- "* C(DISABLED) - Connection failover does not occur."
redirurl:
description:
- "URL to which to redirect traffic if the virtual server becomes unavailable."
- >-
WARNING! Make sure that the domain in the URL does not match the domain specified for a content
switching policy. If it does, requests are continuously redirected to the unavailable virtual server.
- "Minimum length = 1"
cacheable:
description:
- >-
Route cacheable requests to a cache redirection virtual server. The load balancing virtual server can
forward requests only to a transparent cache redirection virtual server that has an IP address and
port combination of *:80, so such a cache redirection virtual server must be configured on the
appliance.
type: bool
clttimeout:
description:
- "Idle time, in seconds, after which a client connection is terminated."
- "Minimum value = C(0)"
- "Maximum value = C(31536000)"
somethod:
choices:
- 'CONNECTION'
- 'DYNAMICCONNECTION'
- 'BANDWIDTH'
- 'HEALTH'
- 'NONE'
description:
- "Type of threshold that, when exceeded, triggers spillover. Available settings function as follows:"
- "* C(CONNECTION) - Spillover occurs when the number of client connections exceeds the threshold."
- >-
* DYNAMICCONNECTION - Spillover occurs when the number of client connections at the virtual server
exceeds the sum of the maximum client (Max Clients) settings for bound services. Do not specify a
spillover threshold for this setting, because the threshold is implied by the Max Clients settings of
bound services.
- >-
* C(BANDWIDTH) - Spillover occurs when the bandwidth consumed by the virtual server's incoming and
outgoing traffic exceeds the threshold.
- >-
* C(HEALTH) - Spillover occurs when the percentage of weights of the services that are UP drops below
the threshold. For example, if services svc1, svc2, and svc3 are bound to a virtual server, with
weights 1, 2, and 3, and the spillover threshold is 50%, spillover occurs if svc1 and svc3 or svc2
and svc3 transition to DOWN.
- "* C(NONE) - Spillover does not occur."
sopersistence:
choices:
- 'enabled'
- 'disabled'
description:
- >-
If spillover occurs, maintain source IP address based persistence for both primary and backup virtual
servers.
sopersistencetimeout:
description:
- "Timeout for spillover persistence, in minutes."
- "Minimum value = C(2)"
- "Maximum value = C(1440)"
healththreshold:
description:
- >-
Threshold in percent of active services below which vserver state is made down. If this threshold is
0, vserver state will be up even if one bound service is up.
- "Minimum value = C(0)"
- "Maximum value = C(100)"
sothreshold:
description:
- >-
Threshold at which spillover occurs. Specify an integer for the C(CONNECTION) spillover method, a
bandwidth value in kilobits per second for the C(BANDWIDTH) method (do not enter the units), or a
percentage for the C(HEALTH) method (do not enter the percentage symbol).
- "Minimum value = C(1)"
- "Maximum value = C(4294967287)"
sobackupaction:
choices:
- 'DROP'
- 'ACCEPT'
- 'REDIRECT'
description:
- >-
Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or
exists.
redirectportrewrite:
choices:
- 'enabled'
- 'disabled'
description:
- "Rewrite the port and change the protocol to ensure successful HTTP redirects from services."
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a virtual server whose state transitions from UP to
DOWN. Do not enable this option for applications that must complete their transactions.
disableprimaryondown:
choices:
- 'enabled'
- 'disabled'
description:
- >-
If the primary virtual server goes down, do not allow it to return to primary status until manually
enabled.
insertvserveripport:
choices:
- 'OFF'
- 'VIPADDR'
- 'V6TOV4MAPPING'
description:
- >-
Insert an HTTP header, whose value is the IP address and port number of the virtual server, before
forwarding a request to the server. The format of the header is <vipHeader>: <virtual server IP
address>_<port number >, where vipHeader is the name that you specify for the header. If the virtual
server has an IPv6 address, the address in the header is enclosed in brackets ([ and ]) to separate
it from the port number. If you have mapped an IPv4 address to a virtual server's IPv6 address, the
value of this parameter determines which IP address is inserted in the header, as follows:
- >-
* C(VIPADDR) - Insert the IP address of the virtual server in the HTTP header regardless of whether the
virtual server has an IPv4 address or an IPv6 address. A mapped IPv4 address, if configured, is
ignored.
- >-
* C(V6TOV4MAPPING) - Insert the IPv4 address that is mapped to the virtual server's IPv6 address. If a
mapped IPv4 address is not configured, insert the IPv6 address.
- "* C(OFF) - Disable header insertion."
vipheader:
description:
- "Name for the inserted header. The default name is vip-header."
- "Minimum length = 1"
authenticationhost:
description:
- >-
Fully qualified domain name (FQDN) of the authentication virtual server to which the user must be
redirected for authentication. Make sure that the Authentication parameter is set to C(yes).
- "Minimum length = 3"
- "Maximum length = 252"
authentication:
description:
- "Enable or disable user authentication."
type: bool
authn401:
description:
- "Enable or disable user authentication with HTTP 401 responses."
type: bool
authnvsname:
description:
- "Name of an authentication virtual server with which to authenticate users."
- "Minimum length = 1"
- "Maximum length = 252"
push:
choices:
- 'enabled'
- 'disabled'
description:
- "Process traffic with the push virtual server that is bound to this load balancing virtual server."
pushvserver:
description:
- >-
Name of the load balancing virtual server, of type PUSH or SSL_PUSH, to which the server pushes
updates received on the load balancing virtual server that you are configuring.
- "Minimum length = 1"
pushlabel:
description:
- >-
Expression for extracting a label from the server's response. Can be either an expression or the name
of a named expression.
pushmulticlients:
description:
- >-
Allow multiple Web 2.0 connections from the same client to connect to the virtual server and expect
updates.
type: bool
tcpprofilename:
description:
- "Name of the TCP profile whose settings are to be applied to the virtual server."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile whose settings are to be applied to the virtual server."
- "Minimum length = 1"
- "Maximum length = 127"
dbprofilename:
description:
- "Name of the DB profile whose settings are to be applied to the virtual server."
- "Minimum length = 1"
- "Maximum length = 127"
comment:
description:
- "Any comments that you might want to associate with the virtual server."
l2conn:
description:
- >-
Use Layer 2 parameters (channel number, MAC address, and VLAN ID) in addition to the 4-tuple (<source
IP>:<source port>::<destination IP>:<destination port>) that is used to identify a connection. Allows
multiple TCP and non-TCP connections with the same 4-tuple to co-exist on the NetScaler appliance.
type: bool
oracleserverversion:
choices:
- '10G'
- '11G'
description:
- "Oracle server version."
mssqlserverversion:
choices:
- '70'
- '2000'
- '2000SP1'
- '2005'
- '2008'
- '2008R2'
- '2012'
- '2014'
description:
- >-
For a load balancing virtual server of type C(MSSQL), the Microsoft SQL Server version. Set this
parameter if you expect some clients to run a version different from the version of the database.
This setting provides compatibility between the client-side and server-side connections by ensuring
that all communication conforms to the server's version.
mysqlprotocolversion:
description:
- "MySQL protocol version that the virtual server advertises to clients."
mysqlserverversion:
description:
- "MySQL server version string that the virtual server advertises to clients."
- "Minimum length = 1"
- "Maximum length = 31"
mysqlcharacterset:
description:
- "Character set that the virtual server advertises to clients."
mysqlservercapabilities:
description:
- "Server capabilities that the virtual server advertises to clients."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Apply AppFlow logging to the virtual server."
netprofile:
description:
- >-
Name of the network profile to associate with the virtual server. If you set this parameter, the
virtual server uses only the IP addresses in the network profile as source IP addresses when
initiating connections with servers.
- "Minimum length = 1"
- "Maximum length = 127"
icmpvsrresponse:
choices:
- 'PASSIVE'
- 'ACTIVE'
description:
- >-
How the NetScaler appliance responds to ping requests received for an IP address that is common to
one or more virtual servers. Available settings function as follows:
- >-
* If set to C(PASSIVE) on all the virtual servers that share the IP address, the appliance always
responds to the ping requests.
- >-
* If set to C(ACTIVE) on all the virtual servers that share the IP address, the appliance responds to
the ping requests if at least one of the virtual servers is UP. Otherwise, the appliance does not
respond.
- >-
* If set to C(ACTIVE) on some virtual servers and PASSIVE on the others, the appliance responds if at
least one virtual server with the ACTIVE setting is UP. Otherwise, the appliance does not respond.
- >-
Note: This parameter is available at the virtual server level. A similar parameter, ICMP Response, is
available at the IP address level, for IPv4 addresses of type VIP. To set that parameter, use the add
ip command in the CLI or the Create IP dialog box in the GUI.
rhistate:
choices:
- 'PASSIVE'
- 'ACTIVE'
description:
- >-
Route Health Injection (RHI) functionality of the NetSaler appliance for advertising the route of the
VIP address associated with the virtual server. When Vserver RHI Level (RHI) parameter is set to
VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate
(RHI STATE) settings on the virtual servers associated with the VIP address:
- >-
* If you set C(rhistate) to C(PASSIVE) on all virtual servers, the NetScaler ADC always advertises the
route for the VIP address.
- >-
* If you set C(rhistate) to C(ACTIVE) on all virtual servers, the NetScaler ADC advertises the route for
the VIP address if at least one of the associated virtual servers is in UP state.
- >-
* If you set C(rhistate) to C(ACTIVE) on some and PASSIVE on others, the NetScaler ADC advertises the
route for the VIP address if at least one of the associated virtual servers, whose C(rhistate) set to
C(ACTIVE), is in UP state.
newservicerequest:
description:
- >-
Number of requests, or percentage of the load on existing services, by which to increase the load on
a new service at each interval in slow-start mode. A non-zero value indicates that slow-start is
applicable. A zero value indicates that the global RR startup parameter is applied. Changing the
value to zero will cause services currently in slow start to take the full traffic as determined by
the LB method. Subsequently, any new services added will use the global RR factor.
newservicerequestunit:
choices:
- 'PER_SECOND'
- 'PERCENT'
description:
- "Units in which to increment load at each interval in slow-start mode."
newservicerequestincrementinterval:
description:
- >-
Interval, in seconds, between successive increments in the load on a new service or a service whose
state has just changed from DOWN to UP. A value of 0 (zero) specifies manual slow start.
- "Minimum value = C(0)"
- "Maximum value = C(3600)"
minautoscalemembers:
description:
- "Minimum number of members expected to be present when vserver is used in Autoscale."
- "Minimum value = C(0)"
- "Maximum value = C(5000)"
maxautoscalemembers:
description:
- "Maximum number of members expected to be present when vserver is used in Autoscale."
- "Minimum value = C(0)"
- "Maximum value = C(5000)"
persistavpno:
description:
- "Persist AVP number for Diameter Persistency."
- "In case this AVP is not defined in Base RFC 3588 and it is nested inside a Grouped AVP,"
- "define a sequence of AVP numbers (max 3) in order of parent to child. So say persist AVP number X"
- "is nested inside AVP Y which is nested in Z, then define the list as Z Y X."
- "Minimum value = C(1)"
skippersistency:
choices:
- 'Bypass'
- 'ReLb'
- 'None'
description:
- >-
This argument decides the behavior incase the service which is selected from an existing persistence
session has reached threshold.
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = C(0)"
- "Maximum value = C(4094)"
authnprofile:
description:
- "Name of the authentication profile to be used when authentication is turned on."
macmoderetainvlan:
choices:
- 'enabled'
- 'disabled'
description:
- "This option is used to retain vlan information of incoming packet when macmode is enabled."
dbslb:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable database specific load balancing for MySQL and MSSQL service types."
dns64:
choices:
- 'enabled'
- 'disabled'
description:
- "This argument is for enabling/disabling the C(dns64) on lbvserver."
bypassaaaa:
description:
- >-
If this option is enabled while resolving DNS64 query AAAA queries are not sent to back end dns
server.
type: bool
recursionavailable:
description:
- >-
When set to YES, this option causes the DNS replies from this vserver to have the RA bit turned on.
Typically one would set this option to YES, when the vserver is load balancing a set of DNS servers
thatsupport recursive queries.
type: bool
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a vserver in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the VServer. DNS profile properties will be applied to
the transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers.
- "Minimum length = 1"
- "Maximum length = 127"
servicebindings:
description:
- List of services along with the weights that are load balanced.
- The following suboptions are available.
suboptions:
servicename:
description:
- "Service to bind to the virtual server."
- "Minimum length = 1"
weight:
description:
- "Weight to assign to the specified service."
- "Minimum value = C(1)"
- "Maximum value = C(100)"
servicegroupbindings:
description:
- List of service groups along with the weights that are load balanced.
- The following suboptions are available.
suboptions:
servicegroupname:
description:
- "The service group name bound to the selected load balancing virtual server."
weight:
description:
- >-
Integer specifying the weight of the service. A larger number specifies a greater weight. Defines the
capacity of the service relative to the other services in the load balancing configuration.
Determines the priority given to the service in load balancing decisions.
- "Minimum value = C(1)"
- "Maximum value = C(100)"
ssl_certkey:
description:
- The name of the ssl certificate that is bound to this service.
- The ssl certificate must already exist.
- Creating the certificate can be done with the M(netscaler_ssl_certkey) module.
- This option is only applicable only when C(servicetype) is C(SSL).
disabled:
description:
- When set to C(yes) the lb vserver will be disabled.
- When set to C(no) the lb vserver will be enabled.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: 'no'
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Netscaler services service-http-1, service-http-2 must have been already created with the netscaler_service module
- name: Create a load balancing vserver bound to services
delegate_to: localhost
netscaler_lb_vserver:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
name: lb_vserver_1
servicetype: HTTP
timeout: 12
ipv46: 6.93.3.3
port: 80
servicebindings:
- servicename: service-http-1
weight: 80
- servicename: service-http-2
weight: 20
# Service group service-group-1 must have been already created with the netscaler_servicegroup module
- name: Create load balancing vserver bound to servicegroup
delegate_to: localhost
netscaler_lb_vserver:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
name: lb_vserver_2
servicetype: HTTP
ipv46: 6.92.2.2
port: 80
timeout: 10
servicegroupbindings:
- servicegroupname: service-group-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
get_immutables_intersection,
ensure_feature_is_enabled
)
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver import lbvserver
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding import lbvserver_servicegroup_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding import lbvserver_service_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding import sslvserver_sslcertkey_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
IMPORT_ERROR = str(e)
PYTHON_SDK_IMPORTED = False
def lb_vserver_exists(client, module):
log('Checking if lb vserver exists')
if lbvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def lb_vserver_identical(client, module, lbvserver_proxy):
log('Checking if configured lb vserver is identical')
lbvserver_list = lbvserver.get_filtered(client, 'name:%s' % module.params['name'])
if lbvserver_proxy.has_equal_attributes(lbvserver_list[0]):
return True
else:
return False
def lb_vserver_diff(client, module, lbvserver_proxy):
lbvserver_list = lbvserver.get_filtered(client, 'name:%s' % module.params['name'])
return lbvserver_proxy.diff_object(lbvserver_list[0])
def get_configured_service_bindings(client, module):
log('Getting configured service bindings')
readwrite_attrs = [
'weight',
'name',
'servicename',
'servicegroupname'
]
readonly_attrs = [
'preferredlocation',
'vserverid',
'vsvrbindsvcip',
'servicetype',
'cookieipport',
'port',
'vsvrbindsvcport',
'curstate',
'ipv46',
'dynamicweight',
]
configured_bindings = {}
if 'servicebindings' in module.params and module.params['servicebindings'] is not None:
for binding in module.params['servicebindings']:
attribute_values_dict = copy.deepcopy(binding)
attribute_values_dict['name'] = module.params['name']
key = binding['servicename'].strip()
configured_bindings[key] = ConfigProxy(
actual=lbvserver_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
)
return configured_bindings
def get_configured_servicegroup_bindings(client, module):
log('Getting configured service group bindings')
readwrite_attrs = [
'weight',
'name',
'servicename',
'servicegroupname',
]
readonly_attrs = []
configured_bindings = {}
if 'servicegroupbindings' in module.params and module.params['servicegroupbindings'] is not None:
for binding in module.params['servicegroupbindings']:
attribute_values_dict = copy.deepcopy(binding)
attribute_values_dict['name'] = module.params['name']
key = binding['servicegroupname'].strip()
configured_bindings[key] = ConfigProxy(
actual=lbvserver_servicegroup_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
)
return configured_bindings
def get_actual_service_bindings(client, module):
log('Getting actual service bindings')
bindings = {}
try:
if lbvserver_service_binding.count(client, module.params['name']) == 0:
return bindings
except nitro_exception as e:
if e.errorcode == 258:
return bindings
else:
raise
bindigs_list = lbvserver_service_binding.get(client, module.params['name'])
for item in bindigs_list:
key = item.servicename
bindings[key] = item
return bindings
def get_actual_servicegroup_bindings(client, module):
log('Getting actual service group bindings')
bindings = {}
try:
if lbvserver_servicegroup_binding.count(client, module.params['name']) == 0:
return bindings
except nitro_exception as e:
if e.errorcode == 258:
return bindings
else:
raise
bindigs_list = lbvserver_servicegroup_binding.get(client, module.params['name'])
for item in bindigs_list:
key = item.servicegroupname
bindings[key] = item
return bindings
def service_bindings_identical(client, module):
log('service_bindings_identical')
# Compare service keysets
configured_service_bindings = get_configured_service_bindings(client, module)
service_bindings = get_actual_service_bindings(client, module)
configured_keyset = set(configured_service_bindings.keys())
service_keyset = set(service_bindings.keys())
if len(configured_keyset ^ service_keyset) > 0:
return False
# Compare service item to item
for key in configured_service_bindings.keys():
conf = configured_service_bindings[key]
serv = service_bindings[key]
log('s diff %s' % conf.diff_object(serv))
if not conf.has_equal_attributes(serv):
return False
# Fallthrough to success
return True
def servicegroup_bindings_identical(client, module):
log('servicegroup_bindings_identical')
# Compare servicegroup keysets
configured_servicegroup_bindings = get_configured_servicegroup_bindings(client, module)
servicegroup_bindings = get_actual_servicegroup_bindings(client, module)
configured_keyset = set(configured_servicegroup_bindings.keys())
service_keyset = set(servicegroup_bindings.keys())
log('len %s' % len(configured_keyset ^ service_keyset))
if len(configured_keyset ^ service_keyset) > 0:
return False
# Compare servicegroup item to item
for key in configured_servicegroup_bindings.keys():
conf = configured_servicegroup_bindings[key]
serv = servicegroup_bindings[key]
log('sg diff %s' % conf.diff_object(serv))
if not conf.has_equal_attributes(serv):
return False
# Fallthrough to success
return True
def sync_service_bindings(client, module):
log('sync_service_bindings')
actual_bindings = get_actual_service_bindings(client, module)
configured_bindigns = get_configured_service_bindings(client, module)
# Delete actual but not configured
delete_keys = list(set(actual_bindings.keys()) - set(configured_bindigns.keys()))
for key in delete_keys:
log('Deleting service binding %s' % key)
actual_bindings[key].servicegroupname = ''
actual_bindings[key].delete(client, actual_bindings[key])
# Add configured but not in actual
add_keys = list(set(configured_bindigns.keys()) - set(actual_bindings.keys()))
for key in add_keys:
log('Adding service binding %s' % key)
configured_bindigns[key].add()
# Update existing if changed
modify_keys = list(set(configured_bindigns.keys()) & set(actual_bindings.keys()))
for key in modify_keys:
if not configured_bindigns[key].has_equal_attributes(actual_bindings[key]):
log('Updating service binding %s' % key)
actual_bindings[key].servicegroupname = ''
actual_bindings[key].delete(client, actual_bindings[key])
configured_bindigns[key].add()
def sync_servicegroup_bindings(client, module):
log('sync_servicegroup_bindings')
actual_bindings = get_actual_servicegroup_bindings(client, module)
configured_bindigns = get_configured_servicegroup_bindings(client, module)
# Delete actual but not configured
delete_keys = list(set(actual_bindings.keys()) - set(configured_bindigns.keys()))
for key in delete_keys:
log('Deleting servicegroup binding %s' % key)
actual_bindings[key].servicename = None
actual_bindings[key].delete(client, actual_bindings[key])
# Add configured but not in actual
add_keys = list(set(configured_bindigns.keys()) - set(actual_bindings.keys()))
for key in add_keys:
log('Adding servicegroup binding %s' % key)
configured_bindigns[key].add()
# Update existing if changed
modify_keys = list(set(configured_bindigns.keys()) & set(actual_bindings.keys()))
for key in modify_keys:
if not configured_bindigns[key].has_equal_attributes(actual_bindings[key]):
log('Updating servicegroup binding %s' % key)
actual_bindings[key].servicename = None
actual_bindings[key].delete(client, actual_bindings[key])
configured_bindigns[key].add()
def ssl_certkey_bindings_identical(client, module):
log('Entering ssl_certkey_bindings_identical')
vservername = module.params['name']
if sslvserver_sslcertkey_binding.count(client, vservername) == 0:
bindings = []
else:
bindings = sslvserver_sslcertkey_binding.get(client, vservername)
log('Existing certs %s' % bindings)
if module.params['ssl_certkey'] is None:
if len(bindings) == 0:
return True
else:
return False
else:
certificate_list = [item.certkeyname for item in bindings]
log('certificate_list %s' % certificate_list)
if certificate_list == [module.params['ssl_certkey']]:
return True
else:
return False
def ssl_certkey_bindings_sync(client, module):
log('Syncing ssl certificates')
vservername = module.params['name']
if sslvserver_sslcertkey_binding.count(client, vservername) == 0:
bindings = []
else:
bindings = sslvserver_sslcertkey_binding.get(client, vservername)
log('bindings len is %s' % len(bindings))
# Delete existing bindings
for binding in bindings:
sslvserver_sslcertkey_binding.delete(client, binding)
# Add binding if appropriate
if module.params['ssl_certkey'] is not None:
binding = sslvserver_sslcertkey_binding()
binding.vservername = module.params['name']
binding.certkeyname = module.params['ssl_certkey']
sslvserver_sslcertkey_binding.add(client, binding)
def do_state_change(client, module, lbvserver_proxy):
if module.params['disabled']:
log('Disabling lb server')
result = lbvserver.disable(client, lbvserver_proxy.actual)
else:
log('Enabling lb server')
result = lbvserver.enable(client, lbvserver_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'DNS',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'RTSP',
'PUSH',
'SSL_PUSH',
'RADIUS',
'RDP',
'MYSQL',
'MSSQL',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'ORACLE',
'SMPP',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX',
]
),
ipv46=dict(type='str'),
ippattern=dict(type='str'),
ipmask=dict(type='str'),
port=dict(type='int'),
range=dict(type='float'),
persistencetype=dict(
type='str',
choices=[
'SOURCEIP',
'COOKIEINSERT',
'SSLSESSION',
'RULE',
'URLPASSIVE',
'CUSTOMSERVERID',
'DESTIP',
'SRCIPDESTIP',
'CALLID',
'RTSPSID',
'DIAMETER',
'FIXSESSION',
'NONE',
]
),
timeout=dict(type='float'),
persistencebackup=dict(
type='str',
choices=[
'SOURCEIP',
'NONE',
]
),
backuppersistencetimeout=dict(type='float'),
lbmethod=dict(
type='str',
choices=[
'ROUNDROBIN',
'LEASTCONNECTION',
'LEASTRESPONSETIME',
'URLHASH',
'DOMAINHASH',
'DESTINATIONIPHASH',
'SOURCEIPHASH',
'SRCIPDESTIPHASH',
'LEASTBANDWIDTH',
'LEASTPACKETS',
'TOKEN',
'SRCIPSRCPORTHASH',
'LRTM',
'CALLIDHASH',
'CUSTOMLOAD',
'LEASTREQUEST',
'AUDITLOGHASH',
'STATICPROXIMITY',
]
),
hashlength=dict(type='float'),
netmask=dict(type='str'),
v6netmasklen=dict(type='float'),
backuplbmethod=dict(
type='str',
choices=[
'ROUNDROBIN',
'LEASTCONNECTION',
'LEASTRESPONSETIME',
'SOURCEIPHASH',
'LEASTBANDWIDTH',
'LEASTPACKETS',
'CUSTOMLOAD',
]
),
cookiename=dict(type='str'),
listenpolicy=dict(type='str'),
listenpriority=dict(type='float'),
persistmask=dict(type='str'),
v6persistmasklen=dict(type='float'),
rtspnat=dict(type='bool'),
m=dict(
type='str',
choices=[
'IP',
'MAC',
'IPTUNNEL',
'TOS',
]
),
tosid=dict(type='float'),
datalength=dict(type='float'),
dataoffset=dict(type='float'),
sessionless=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
connfailover=dict(
type='str',
choices=[
'DISABLED',
'STATEFUL',
'STATELESS',
]
),
redirurl=dict(type='str'),
cacheable=dict(type='bool'),
clttimeout=dict(type='float'),
somethod=dict(
type='str',
choices=[
'CONNECTION',
'DYNAMICCONNECTION',
'BANDWIDTH',
'HEALTH',
'NONE',
]
),
sopersistence=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
sopersistencetimeout=dict(type='float'),
healththreshold=dict(type='float'),
sothreshold=dict(type='float'),
sobackupaction=dict(
type='str',
choices=[
'DROP',
'ACCEPT',
'REDIRECT',
]
),
redirectportrewrite=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
disableprimaryondown=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
insertvserveripport=dict(
type='str',
choices=[
'OFF',
'VIPADDR',
'V6TOV4MAPPING',
]
),
vipheader=dict(type='str'),
authenticationhost=dict(type='str'),
authentication=dict(type='bool'),
authn401=dict(type='bool'),
authnvsname=dict(type='str'),
push=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
pushvserver=dict(type='str'),
pushlabel=dict(type='str'),
pushmulticlients=dict(type='bool'),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
dbprofilename=dict(type='str'),
comment=dict(type='str'),
l2conn=dict(type='bool'),
oracleserverversion=dict(
type='str',
choices=[
'10G',
'11G',
]
),
mssqlserverversion=dict(
type='str',
choices=[
'70',
'2000',
'2000SP1',
'2005',
'2008',
'2008R2',
'2012',
'2014',
]
),
mysqlprotocolversion=dict(type='float'),
mysqlserverversion=dict(type='str'),
mysqlcharacterset=dict(type='float'),
mysqlservercapabilities=dict(type='float'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
netprofile=dict(type='str'),
icmpvsrresponse=dict(
type='str',
choices=[
'PASSIVE',
'ACTIVE',
]
),
rhistate=dict(
type='str',
choices=[
'PASSIVE',
'ACTIVE',
]
),
newservicerequest=dict(type='float'),
newservicerequestunit=dict(
type='str',
choices=[
'PER_SECOND',
'PERCENT',
]
),
newservicerequestincrementinterval=dict(type='float'),
minautoscalemembers=dict(type='float'),
maxautoscalemembers=dict(type='float'),
skippersistency=dict(
type='str',
choices=[
'Bypass',
'ReLb',
'None',
]
),
authnprofile=dict(type='str'),
macmoderetainvlan=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
dbslb=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
dns64=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
bypassaaaa=dict(type='bool'),
recursionavailable=dict(type='bool'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
dnsprofilename=dict(type='str'),
)
hand_inserted_arguments = dict(
servicebindings=dict(type='list'),
servicegroupbindings=dict(type='list'),
ssl_certkey=dict(type='str'),
disabled=dict(
type='bool',
default=False
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'name',
'servicetype',
'ipv46',
'ippattern',
'ipmask',
'port',
'range',
'persistencetype',
'timeout',
'persistencebackup',
'backuppersistencetimeout',
'lbmethod',
'hashlength',
'netmask',
'v6netmasklen',
'backuplbmethod',
'cookiename',
'listenpolicy',
'listenpriority',
'persistmask',
'v6persistmasklen',
'rtspnat',
'm',
'tosid',
'datalength',
'dataoffset',
'sessionless',
'connfailover',
'redirurl',
'cacheable',
'clttimeout',
'somethod',
'sopersistence',
'sopersistencetimeout',
'healththreshold',
'sothreshold',
'sobackupaction',
'redirectportrewrite',
'downstateflush',
'disableprimaryondown',
'insertvserveripport',
'vipheader',
'authenticationhost',
'authentication',
'authn401',
'authnvsname',
'push',
'pushvserver',
'pushlabel',
'pushmulticlients',
'tcpprofilename',
'httpprofilename',
'dbprofilename',
'comment',
'l2conn',
'oracleserverversion',
'mssqlserverversion',
'mysqlprotocolversion',
'mysqlserverversion',
'mysqlcharacterset',
'mysqlservercapabilities',
'appflowlog',
'netprofile',
'icmpvsrresponse',
'rhistate',
'newservicerequest',
'newservicerequestunit',
'newservicerequestincrementinterval',
'minautoscalemembers',
'maxautoscalemembers',
'skippersistency',
'authnprofile',
'macmoderetainvlan',
'dbslb',
'dns64',
'bypassaaaa',
'recursionavailable',
'processlocal',
'dnsprofilename',
]
readonly_attrs = [
'value',
'ipmapping',
'ngname',
'type',
'curstate',
'effectivestate',
'status',
'lbrrreason',
'redirect',
'precedence',
'homepage',
'dnsvservername',
'domain',
'policyname',
'cachevserver',
'health',
'gotopriorityexpression',
'ruletype',
'groupname',
'cookiedomain',
'map',
'gt2gb',
'consolidatedlconn',
'consolidatedlconngbl',
'thresholdvalue',
'bindpoint',
'invoke',
'labeltype',
'labelname',
'version',
'totalservices',
'activeservices',
'statechangetimesec',
'statechangetimeseconds',
'statechangetimemsec',
'tickssincelaststatechange',
'isgslb',
'vsvrdynconnsothreshold',
'backupvserverstatus',
'__count',
]
immutable_attrs = [
'name',
'servicetype',
'ipv46',
'port',
'range',
'state',
'redirurl',
'vipheader',
'newservicerequestunit',
'td',
]
transforms = {
'rtspnat': ['bool_on_off'],
'authn401': ['bool_on_off'],
'bypassaaaa': ['bool_yes_no'],
'authentication': ['bool_on_off'],
'cacheable': ['bool_yes_no'],
'l2conn': ['bool_on_off'],
'pushmulticlients': ['bool_yes_no'],
'recursionavailable': ['bool_yes_no'],
'sessionless': [lambda v: v.upper()],
'sopersistence': [lambda v: v.upper()],
'redirectportrewrite': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'disableprimaryondown': [lambda v: v.upper()],
'push': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'macmoderetainvlan': [lambda v: v.upper()],
'dbslb': [lambda v: v.upper()],
'dns64': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
lbvserver_proxy = ConfigProxy(
actual=lbvserver(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'LB')
if module.params['state'] == 'present':
log('Applying actions for state present')
if not lb_vserver_exists(client, module):
log('Add lb vserver')
if not module.check_mode:
lbvserver_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not lb_vserver_identical(client, module, lbvserver_proxy):
# Check if we try to change value of immutable attributes
diff_dict = lb_vserver_diff(client, module, lbvserver_proxy)
immutables_changed = get_immutables_intersection(lbvserver_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
log('Update lb vserver')
if not module.check_mode:
lbvserver_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
log('Present noop')
if not service_bindings_identical(client, module):
if not module.check_mode:
sync_service_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if not servicegroup_bindings_identical(client, module):
if not module.check_mode:
sync_servicegroup_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if module.params['servicetype'] != 'SSL' and module.params['ssl_certkey'] is not None:
module.fail_json(msg='ssl_certkey is applicable only to SSL vservers', **module_result)
# Check if SSL certkey is sane
if module.params['servicetype'] == 'SSL':
if not ssl_certkey_bindings_identical(client, module):
if not module.check_mode:
ssl_certkey_bindings_sync(client, module)
module_result['changed'] = True
if not module.check_mode:
res = do_state_change(client, module, lbvserver_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check
log('Sanity checks for state present')
if not module.check_mode:
if not lb_vserver_exists(client, module):
module.fail_json(msg='Did not create lb vserver', **module_result)
if not lb_vserver_identical(client, module, lbvserver_proxy):
msg = 'lb vserver is not configured correctly'
module.fail_json(msg=msg, diff=lb_vserver_diff(client, module, lbvserver_proxy), **module_result)
if not service_bindings_identical(client, module):
module.fail_json(msg='service bindings are not identical', **module_result)
if not servicegroup_bindings_identical(client, module):
module.fail_json(msg='servicegroup bindings are not identical', **module_result)
if module.params['servicetype'] == 'SSL':
if not ssl_certkey_bindings_identical(client, module):
module.fail_json(msg='sll certkey bindings not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if lb_vserver_exists(client, module):
if not module.check_mode:
log('Delete lb vserver')
lbvserver_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
log('Absent noop')
module_result['changed'] = False
# Sanity check
log('Sanity checks for state absent')
if not module.check_mode:
if lb_vserver_exists(client, module):
module.fail_json(msg='lb vserver still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
leansoft/edx-platform | openedx/core/djangoapps/credit/tasks.py | 14 | 7285 | """
This file contains celery tasks for credit course views.
"""
import datetime
from pytz import UTC
from django.conf import settings
from celery import task
from celery.utils.log import get_task_logger
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from .api import set_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.utils import get_course_blocks
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
LOGGER = get_task_logger(__name__)
# XBlocks that can be added as credit requirements
CREDIT_REQUIREMENT_XBLOCK_CATEGORIES = [
"edx-reverification-block",
]
# pylint: disable=not-callable
@task(default_retry_delay=settings.CREDIT_TASK_DEFAULT_RETRY_DELAY, max_retries=settings.CREDIT_TASK_MAX_RETRIES)
def update_credit_course_requirements(course_id): # pylint: disable=invalid-name
"""
Updates course requirements table for a course.
Args:
course_id(str): A string representation of course identifier
Returns:
None
"""
try:
course_key = CourseKey.from_string(course_id)
is_credit_course = CreditCourse.is_credit_course(course_key)
if is_credit_course:
requirements = _get_course_credit_requirements(course_key)
set_credit_requirements(course_key, requirements)
except (InvalidKeyError, ItemNotFoundError, InvalidCreditRequirements) as exc:
LOGGER.error('Error on adding the requirements for course %s - %s', course_id, unicode(exc))
raise update_credit_course_requirements.retry(args=[course_id], exc=exc)
else:
LOGGER.info('Requirements added for course %s', course_id)
def _get_course_credit_requirements(course_key):
"""
Returns the list of credit requirements for the given course.
This will also call into the edx-proctoring subsystem to also
produce proctored exam requirements for credit bearing courses
It returns the minimum_grade_credit and also the ICRV checkpoints
if any were added in the course
Args:
course_key (CourseKey): Identifier for the course.
Returns:
List of credit requirements (dictionaries)
"""
credit_xblock_requirements = _get_credit_course_requirement_xblocks(course_key)
min_grade_requirement = _get_min_grade_requirement(course_key)
proctored_exams_requirements = _get_proctoring_requirements(course_key)
credit_requirements = (
min_grade_requirement + credit_xblock_requirements + proctored_exams_requirements
)
return credit_requirements
def _get_min_grade_requirement(course_key):
"""
Get list of 'minimum_grade_credit' requirement for the given course.
Args:
course_key (CourseKey): Identifier for the course.
Returns:
The list of minimum_grade_credit requirements
"""
course = modulestore().get_course(course_key, depth=0)
try:
return [
{
"namespace": "grade",
"name": "grade",
"display_name": "Minimum Grade",
"criteria": {
"min_grade": getattr(course, "minimum_grade_credit")
},
}
]
except AttributeError:
LOGGER.error("The course %s does not has minimum_grade_credit attribute", unicode(course.id))
else:
return []
def _get_credit_course_requirement_xblocks(course_key): # pylint: disable=invalid-name
"""Generate a course structure dictionary for the specified course.
Args:
course_key (CourseKey): Identifier for the course.
Returns:
The list of credit requirements xblocks dicts
"""
requirements = []
# Retrieve all XBlocks from the course that we know to be credit requirements.
# For performance reasons, we look these up by their "category" to avoid
# loading and searching the entire course tree.
for category in CREDIT_REQUIREMENT_XBLOCK_CATEGORIES:
requirements.extend([
{
"namespace": block.get_credit_requirement_namespace(),
"name": block.get_credit_requirement_name(),
"display_name": block.get_credit_requirement_display_name(),
"criteria": {},
}
for block in _get_xblocks(course_key, category)
if _is_credit_requirement(block)
])
return requirements
def _get_xblocks(course_key, category):
"""
Retrieve all XBlocks in the course for a particular category.
Returns only XBlocks that are published and haven't been deleted.
"""
xblocks = get_course_blocks(course_key, category)
# Secondary sort on credit requirement name
xblocks = sorted(xblocks, key=lambda block: block.get_credit_requirement_display_name())
# Primary sort on start date
xblocks = sorted(xblocks, key=lambda block: (
block.start if block.start is not None
else datetime.datetime(datetime.MINYEAR, 1, 1).replace(tzinfo=UTC)
))
return xblocks
def _is_credit_requirement(xblock):
"""
Check if the given XBlock is a credit requirement.
Args:
xblock(XBlock): The given XBlock object
Returns:
True if XBlock is a credit requirement else False
"""
required_methods = [
"get_credit_requirement_namespace",
"get_credit_requirement_name",
"get_credit_requirement_display_name"
]
for method_name in required_methods:
if not callable(getattr(xblock, method_name, None)):
LOGGER.error(
"XBlock %s is marked as a credit requirement but does not "
"implement %s", unicode(xblock), method_name
)
return False
return True
def _get_proctoring_requirements(course_key):
"""
Will return list of requirements regarding any exams that have been
marked as proctored exams. For credit-bearing courses, all
proctored exams must be validated and confirmed from a proctoring
standpoint. The passing grade on an exam is not enough.
Args:
course_key: The key of the course in question
Returns:
list of requirements dictionary, one per active proctored exam
"""
# Note: Need to import here as there appears to be
# a circular reference happening when launching Studio
# process
from edx_proctoring.api import get_all_exams_for_course
requirements = [
{
'namespace': 'proctored_exam',
'name': exam['content_id'],
'display_name': exam['exam_name'],
'criteria': {},
}
for exam in get_all_exams_for_course(unicode(course_key))
# practice exams do not count towards eligibility
if exam['is_proctored'] and exam['is_active'] and not exam['is_practice_exam']
]
log_msg = (
'Registering the following as \'proctored_exam\' credit requirements: {log_msg}'.format(
log_msg=requirements
)
)
LOGGER.info(log_msg)
return requirements
| agpl-3.0 |
tomakant/data-science-from-scratch | code/decision_trees.py | 60 | 5866 | from __future__ import division
from collections import Counter, defaultdict
from functools import partial
import math, random
def entropy(class_probabilities):
"""given a list of class probabilities, compute the entropy"""
return sum(-p * math.log(p, 2) for p in class_probabilities if p)
def class_probabilities(labels):
total_count = len(labels)
return [count / total_count
for count in Counter(labels).values()]
def data_entropy(labeled_data):
labels = [label for _, label in labeled_data]
probabilities = class_probabilities(labels)
return entropy(probabilities)
def partition_entropy(subsets):
"""find the entropy from this partition of data into subsets"""
total_count = sum(len(subset) for subset in subsets)
return sum( data_entropy(subset) * len(subset) / total_count
for subset in subsets )
def group_by(items, key_fn):
"""returns a defaultdict(list), where each input item
is in the list whose key is key_fn(item)"""
groups = defaultdict(list)
for item in items:
key = key_fn(item)
groups[key].append(item)
return groups
def partition_by(inputs, attribute):
"""returns a dict of inputs partitioned by the attribute
each input is a pair (attribute_dict, label)"""
return group_by(inputs, lambda x: x[0][attribute])
def partition_entropy_by(inputs,attribute):
"""computes the entropy corresponding to the given partition"""
partitions = partition_by(inputs, attribute)
return partition_entropy(partitions.values())
def classify(tree, input):
"""classify the input using the given decision tree"""
# if this is a leaf node, return its value
if tree in [True, False]:
return tree
# otherwise find the correct subtree
attribute, subtree_dict = tree
subtree_key = input.get(attribute) # None if input is missing attribute
if subtree_key not in subtree_dict: # if no subtree for key,
subtree_key = None # we'll use the None subtree
subtree = subtree_dict[subtree_key] # choose the appropriate subtree
return classify(subtree, input) # and use it to classify the input
def build_tree_id3(inputs, split_candidates=None):
# if this is our first pass,
# all keys of the first input are split candidates
if split_candidates is None:
split_candidates = inputs[0][0].keys()
# count Trues and Falses in the inputs
num_inputs = len(inputs)
num_trues = len([label for item, label in inputs if label])
num_falses = num_inputs - num_trues
if num_trues == 0: # if only Falses are left
return False # return a "False" leaf
if num_falses == 0: # if only Trues are left
return True # return a "True" leaf
if not split_candidates: # if no split candidates left
return num_trues >= num_falses # return the majority leaf
# otherwise, split on the best attribute
best_attribute = min(split_candidates,
key=partial(partition_entropy_by, inputs))
partitions = partition_by(inputs, best_attribute)
new_candidates = [a for a in split_candidates
if a != best_attribute]
# recursively build the subtrees
subtrees = { attribute : build_tree_id3(subset, new_candidates)
for attribute, subset in partitions.iteritems() }
subtrees[None] = num_trues > num_falses # default case
return (best_attribute, subtrees)
def forest_classify(trees, input):
votes = [classify(tree, input) for tree in trees]
vote_counts = Counter(votes)
return vote_counts.most_common(1)[0][0]
if __name__ == "__main__":
inputs = [
({'level':'Senior','lang':'Java','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'Java','tweets':'no','phd':'yes'}, False),
({'level':'Mid','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'yes'}, False),
({'level':'Mid','lang':'R','tweets':'yes','phd':'yes'}, True),
({'level':'Senior','lang':'Python','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'yes','phd':'no'}, True),
({'level':'Senior','lang':'Python','tweets':'yes','phd':'yes'},True),
({'level':'Mid','lang':'Python','tweets':'no','phd':'yes'}, True),
({'level':'Mid','lang':'Java','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'yes'},False)
]
for key in ['level','lang','tweets','phd']:
print key, partition_entropy_by(inputs, key)
print
senior_inputs = [(input, label)
for input, label in inputs if input["level"] == "Senior"]
for key in ['lang', 'tweets', 'phd']:
print key, partition_entropy_by(senior_inputs, key)
print
print "building the tree"
tree = build_tree_id3(inputs)
print tree
print "Junior / Java / tweets / no phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "no"} )
print "Junior / Java / tweets / phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "yes"} )
print "Intern", classify(tree, { "level" : "Intern" } )
print "Senior", classify(tree, { "level" : "Senior" } )
| unlicense |
nivertech/duktape | util/filter_test262_log.py | 12 | 2355 | #!/usr/bin/python
import os
import sys
import json
import yaml
def main():
with open(sys.argv[1], 'rb') as f:
known_issues = yaml.load(f.read())
skipstrings = [
'passed in strict mode',
'passed in non-strict mode',
'failed in strict mode as expected',
'failed in non-strict mode as expected'
]
in_failed_tests = False
tofix_count = 0 # count of bugs that will be fixed (no uncertainty about proper behavior etc)
known_errors = []
diagnosed_errors = []
unknown_errors = []
other_errors = []
for line in sys.stdin:
if len(line) > 1 and line[-1] == '\n':
line = line[:-1]
# Skip success cases
skip = False
for sk in skipstrings:
if sk in line:
skip = True
if skip:
continue
# Augment error list with "known bugs"
print(line) # print error list as is, then refined version later
if 'failed tests' in line.lower():
in_failed_tests = True
continue
if in_failed_tests and line.strip() == '':
in_failed_tests = False
continue
if in_failed_tests:
# " intl402/ch12/12.2/12.2.3_c in non-strict mode"
tmp = line.strip().split(' ')
test = tmp[0]
matched = False
for kn in known_issues:
if kn.get('test', None) != test:
continue
if kn.has_key('diagnosed'):
tofix_count += 1
diagnosed_errors.append(line + ' // diagnosed: ' + kn['diagnosed'])
elif kn.has_key('knownissue'):
# don't bump tofix_count, as testcase expected result is not certain
known_errors.append(line + ' // KNOWN: ' + kn['knownissue'])
else:
tofix_count += 1
unknown_errors.append(line + ' // ??? (rule matches)')
kn['used'] = True # mark rule used
matched = True
break
if matched:
continue
# no match, to fix
other_errors.append(line)
tofix_count += 1
print('')
print('=== CATEGORISED ERRORS ===')
print('')
for i in known_errors:
print(i)
for i in diagnosed_errors:
print(i)
for i in unknown_errors:
print(i)
for i in other_errors:
print(i)
# Check for unused rules (e.g. bugs fixed)
print('')
for kn in known_issues:
if not kn.has_key('used'):
print('WARNING: unused rule: ' + json.dumps(kn))
# To fix count
print('')
print('TO-FIX COUNT: ' + str(tofix_count))
print(' = test case failures which need fixing (Duktape bugs, uninvestigated)')
if __name__ == '__main__':
main()
| mit |
timothyclemansinsea/smc | src/dev/project/util.py | 1 | 1250 | from __future__ import print_function
import os, json, socket
join = os.path.join
def cmd(s):
print(s)
if os.system(s):
raise RuntimeError
def chdir():
os.chdir(os.path.split(os.path.abspath(__file__))[0])
def base_url():
info_file = join(os.environ['SMC'], 'info.json')
info = json.loads(open(info_file).read())
base_url = "/{project_id}/port/{hub_port}".format(project_id=info['project_id'], hub_port=get_ports()['hub'])
open("../../data/base_url",'w').write(base_url)
return base_url
def get_open_port(): # http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def get_ports():
P = os.path.split(os.path.abspath(__file__))[0]
path = join(P, 'ports')
if not os.path.exists(path):
os.mkdir(path)
ports = {'hub':0, 'rethinkdb':0, 'rethinkdb_cluster':0}
for x in ports.keys():
file = join(path, x)
if os.path.exists(file):
ports[x] = int(open(file).read())
else:
ports[x] = get_open_port()
open(file,'w').write(str(ports[x]))
return ports
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/PyQt4/examples/tools/i18n/i18n_rc3.py | 5 | 48804 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Mar 20 13:49:41 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x03\x4c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3d\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x71\x02\xf0\x8c\x31\x00\x00\x00\x8e\x05\x93\x08\xe5\
\x00\x00\x00\xaf\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x25\x0e\x9f\xe7\x05\x00\x00\x01\x40\x69\x00\x00\x01\
\x87\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x75\x00\x65\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x10\x00\x26\x00\x46\x00\x69\x00\x63\
\x00\x68\x00\x69\x00\x65\x00\x72\x05\x00\x2a\xd0\x25\x01\x03\x00\
\x00\x00\x10\x00\x26\x00\x51\x00\x75\x00\x69\x00\x74\x00\x74\x00\
\x65\x00\x72\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0e\x00\x50\
\x00\x72\x00\x65\x00\x6d\x00\x69\x00\x65\x00\x72\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x12\x00\x54\x00\x72\x00\x6f\x00\x69\x00\
\x73\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x16\x00\x4c\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x65\
\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\
\x75\x00\x65\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x44\
\x00\x65\x00\x75\x00\x78\x00\x69\x00\xe8\x00\x6d\x00\x65\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\xe9\x00\x74\x00\x72\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\x00\x76\x00\x65\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x00\x46\x00\x72\x00\
\x61\x00\x6e\x00\xe7\x00\x61\x00\x69\x00\x73\x05\x0c\x4e\x30\xd8\
\x01\x03\x00\x00\x00\x3c\x00\x45\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x6c\x00\x65\x00\x20\x00\x64\x00\x27\x00\x69\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xb2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x33\x00\x4d\x09\xa4\x00\x00\x00\x46\x00\x5a\xf0\x84\
\x00\x00\x00\x57\x02\xf0\x8c\x31\x00\x00\x00\x68\x05\x93\x08\xe5\
\x00\x00\x00\x81\x05\x9b\xa6\x44\x00\x00\x00\x90\x06\x3c\xe8\x53\
\x00\x00\x00\xa1\x06\xec\x79\x65\x00\x00\x00\xb2\x0c\x4e\x30\xd8\
\x00\x00\x00\xc5\x0e\x9f\xe7\x05\x00\x00\x00\xd6\x69\x00\x00\x00\
\xed\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\xbc\xf4\xae\x30\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x08\xd3\x0c\xc7\x7c\x00\x26\x00\x46\x05\x00\
\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\xc8\x85\xb8\xcc\x00\x26\x00\
\x58\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\xcc\xab\xbc\x88\
\xc9\xf8\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\xc1\x38\xbc\
\x88\xc9\xf8\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0e\xc5\xb8\
\xc5\xb4\x00\x20\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x04\xbe\x57\xac\x01\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x06\xb4\x50\xbc\x88\xc9\xf8\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x06\xb4\xf1\xce\x21\xb3\xc4\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x08\xc6\xd0\xad\xfc\xd6\x54\xbc\x95\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x06\xd5\x5c\xad\x6d\xc5\
\xb4\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x0c\xad\x6d\xc8\x1c\
\xd6\x54\x00\x20\xc6\x08\xc8\x1c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x4a\x00\x5a\xf0\x84\
\x00\x00\x00\x61\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x93\x05\x9b\xa6\x44\x00\x00\x00\xaa\x06\x3c\xe8\x53\
\x00\x00\x00\xc1\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x04\x12\x04\x38\x04\x34\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x04\x24\x04\x30\x04\x39\x04\x3b\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x04\x12\x04\x4b\x04\
\x45\x04\x3e\x04\x34\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\
\x04\x1f\x04\x35\x04\x40\x04\x32\x04\x4b\x04\x39\x05\x00\x4d\x09\
\xa4\x01\x03\x00\x00\x00\x0c\x04\x22\x04\x40\x04\x35\x04\x42\x04\
\x38\x04\x39\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x10\x04\x2f\
\x04\x37\x04\x4b\x04\x3a\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\
\xf0\x8c\x31\x01\x03\x00\x00\x00\x0c\x04\x1a\x04\x43\x04\x40\x04\
\x41\x04\x38\x04\x32\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\
\x04\x12\x04\x42\x04\x3e\x04\x40\x04\x3e\x04\x39\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x1c\x04\x18\x04\x37\x04\x3e\x04\x3c\x04\
\x35\x04\x42\x04\x40\x04\x38\x04\x47\x04\x35\x04\x41\x04\x3a\x04\
\x38\x04\x39\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x04\x1f\
\x04\x35\x04\x40\x04\x41\x04\x3f\x04\x35\x04\x3a\x04\x42\x04\x38\
\x04\x32\x04\x30\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x04\
\x20\x04\x43\x04\x41\x04\x41\x04\x3a\x04\x38\x04\x39\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x34\x04\x1f\x04\x40\x04\x38\x04\x3c\
\x04\x35\x04\x40\x00\x20\x04\x38\x04\x3d\x04\x42\x04\x35\x04\x40\
\x04\x3d\x04\x30\x04\x46\x04\x38\x04\x3d\x04\x3e\x04\x30\x04\x3b\
\x04\x38\x04\x37\x04\x30\x04\x46\x04\x38\x04\x38\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2e\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x84\x05\x93\x08\xe5\
\x00\x00\x00\xa1\x05\x9b\xa6\x44\x00\x00\x00\xb6\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x0d\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x69\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x73\x00\x61\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x41\x00\x72\
\x00\x6b\x00\x69\x00\x76\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x10\x00\x26\x00\x41\x00\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\
\x61\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf6\
\x00\x72\x00\x73\x00\x74\x00\x61\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0c\x00\x54\x00\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\
\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\
\x00\xe5\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0a\x00\x53\x00\x6b\x00\x65\x00\x76\x00\
\x74\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\
\x00\x64\x00\x72\x00\x61\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x73\x00\x6b\x00\x74\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x16\x00\x50\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\
\x00\x74\x00\x69\x00\x76\x00\x74\x05\x06\xec\x79\x65\x01\x03\x00\
\x00\x00\x0e\x00\x53\x00\x76\x00\x65\x00\x6e\x00\x73\x00\x6b\x00\
\x61\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\
\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\
\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\x00\x72\x00\x69\
\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x78\x00\x65\x00\x6d\x00\x70\
\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\
\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x50\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x5c\x00\x5a\xf0\x84\
\x00\x00\x00\x75\x02\xf0\x8c\x31\x00\x00\x00\x90\x05\x93\x08\xe5\
\x00\x00\x00\xb1\x05\x9b\xa6\x44\x00\x00\x00\xc8\x06\x3c\xe8\x53\
\x00\x00\x00\xe3\x06\xec\x79\x65\x00\x00\x01\x04\x0c\x4e\x30\xd8\
\x00\x00\x01\x2b\x0e\x9f\xe7\x05\x00\x00\x01\x44\x69\x00\x00\x01\
\x8b\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x6e\x00\x73\x00\x69\x00\
\x63\x00\x68\x00\x74\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0c\
\x00\x26\x00\x44\x00\x61\x00\x74\x00\x65\x00\x69\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x10\x00\x42\x00\x65\x00\x26\x00\x65\x00\
\x6e\x00\x64\x00\x65\x00\x6e\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x0e\x00\x45\x00\x72\x00\x73\x00\x74\x00\x65\x00\x6e\x00\x73\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x10\x00\x44\x00\x72\x00\
\x69\x00\x74\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x16\x00\x53\x00\x70\x00\x72\x00\x61\x00\x63\
\x00\x68\x00\x65\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\
\x31\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x63\x00\x68\x00\x69\x00\
\x65\x00\x66\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x10\x00\x5a\
\x00\x77\x00\x65\x00\x69\x00\x74\x00\x65\x00\x6e\x00\x73\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\
\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\x63\x00\x68\x05\
\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x1c\x00\x50\x00\x65\x00\x72\
\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x69\
\x00\x73\x00\x63\x00\x68\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x0e\x00\x44\x00\x65\x00\x75\x00\x74\x00\x73\x00\x63\x00\x68\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3c\x00\x49\x00\x6e\x00\x74\
\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x69\x00\x65\x00\x72\x00\x75\
\x00\x6e\x00\x67\x00\x73\x00\x62\x00\x65\x00\x69\x00\x73\x00\x70\
\x00\x69\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xbc\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x20\x00\x47\xdf\x04\
\x00\x00\x00\x37\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x5f\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x87\x05\x9b\xa6\x44\x00\x00\x00\x98\x06\x3c\xe8\x53\
\x00\x00\x00\xa9\x06\xec\x79\x65\x00\x00\x00\xbc\x0c\x4e\x30\xd8\
\x00\x00\x00\xcf\x0e\x9f\xe7\x05\x00\x00\x00\xe2\x69\x00\x00\x00\
\xf7\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x04\x89\xc6\x56\xfe\x05\x00\x05\xcf\xc7\
\x01\x03\x00\x00\x00\x0c\x65\x87\x4e\xf6\x00\x5b\x00\x26\x00\x46\
\x00\x5d\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0c\x90\x00\x51\
\xfa\x00\x5b\x00\x26\x00\x78\x00\x5d\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x06\x7b\x2c\x4e\x00\x4e\x2a\x05\x00\x4d\x09\xa4\x01\
\x03\x00\x00\x00\x06\x7b\x2c\x4e\x09\x4e\x2a\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x0c\x8b\xed\x8a\x00\x00\x3a\x00\x20\x00\x25\
\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x06\x65\x9c\x62\
\x95\x5f\x71\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\
\x4e\x8c\x4e\x2a\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x08\x7b\
\x49\x89\xd2\x62\x95\x5f\x71\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x08\x90\x0f\x89\xc6\x62\x95\x5f\x71\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x08\x7b\x80\x4f\x53\x4e\x2d\x65\x87\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x0a\x56\xfd\x96\x45\x53\x16\x83\x03\
\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xe0\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4c\x00\x5a\xf0\x84\
\x00\x00\x00\x5d\x02\xf0\x8c\x31\x00\x00\x00\x70\x05\x93\x08\xe5\
\x00\x00\x00\x8d\x05\x9b\xa6\x44\x00\x00\x00\xa0\x06\x3c\xe8\x53\
\x00\x00\x00\xb3\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdf\x0e\x9f\xe7\x05\x00\x00\x00\xf8\x69\x00\x00\x01\
\x1b\x03\x00\x00\x00\x06\x00\x52\x00\x54\x00\x4c\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x06\x45\x06\x31\x06\x26\x06\x49\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x06\x27\x06\x44\x06\x45\
\x06\x44\x06\x41\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x08\x06\
\x23\x06\x2e\x06\x31\x06\x2c\x05\x00\x47\xdf\x04\x01\x03\x00\x00\
\x00\x06\x06\x23\x06\x48\x06\x44\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x06\x2b\x06\x27\x06\x44\x06\x2b\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x12\x06\x27\x06\x44\x06\x44\x06\x3a\x06\x29\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x08\x06\x45\x06\x35\x06\x45\x06\x2a\x05\x05\x93\x08\xe5\
\x01\x03\x00\x00\x00\x08\x06\x2b\x06\x27\x06\x46\x06\x49\x05\x05\
\x9b\xa6\x44\x01\x03\x00\x00\x00\x0c\x06\x45\x06\x2a\x06\x45\x06\
\x27\x06\x2b\x06\x44\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x0a\
\x06\x45\x06\x46\x06\x38\x06\x48\x06\x31\x05\x06\xec\x79\x65\x01\
\x03\x00\x00\x00\x0e\x06\x27\x06\x44\x06\x39\x06\x31\x06\x28\x06\
\x4a\x06\x29\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x18\x06\x45\
\x06\x2b\x06\x27\x06\x44\x00\x20\x06\x27\x06\x44\x06\x2a\x06\x2f\
\x06\x48\x06\x4a\x06\x44\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\
\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x1c\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x28\x00\x47\xdf\x04\
\x00\x00\x00\x41\x00\x4d\x09\xa4\x00\x00\x00\x58\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x82\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xbc\x06\x3c\xe8\x53\
\x00\x00\x00\xd1\x06\xec\x79\x65\x00\x00\x00\xf2\x0c\x4e\x30\xd8\
\x00\x00\x01\x15\x0e\x9f\xe7\x05\x00\x00\x01\x2a\x69\x00\x00\x01\
\x57\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0c\x00\x50\x00\x6f\x00\x68\x00\x6c\x00\
\x65\x00\x64\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\
\x00\x53\x00\x6f\x00\x75\x00\x62\x00\x6f\x00\x72\x05\x00\x2a\xd0\
\x25\x01\x03\x00\x00\x00\x0c\x00\x26\x00\x4b\x00\x6f\x00\x6e\x00\
\x65\x00\x63\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x00\x50\
\x00\x72\x00\x76\x00\x6e\x00\xed\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x0a\x00\x54\x01\x59\x00\x65\x00\x74\x00\xed\x05\x00\x5a\
\xf0\x84\x01\x03\x00\x00\x00\x12\x00\x4a\x00\x61\x00\x79\x00\x7a\
\x00\x6b\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x12\x00\x4e\x00\x61\x00\x6b\x00\x6c\x00\x6f\x00\
\x6e\x01\x1b\x00\x6e\x00\xfd\x05\x05\x93\x08\xe5\x01\x03\x00\x00\
\x00\x0a\x00\x44\x00\x72\x00\x75\x00\x68\x00\xfd\x05\x05\x9b\xa6\
\x44\x01\x03\x00\x00\x00\x16\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\
\x65\x00\x74\x00\x72\x00\x69\x00\x63\x00\x6b\x00\xfd\x05\x06\x3c\
\xe8\x53\x01\x03\x00\x00\x00\x18\x00\x50\x00\x65\x00\x72\x00\x73\
\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\x00\x6e\x00\xed\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x01\x0c\x00\x65\x00\
\x73\x00\x6b\x00\xfd\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x22\
\x00\x55\x00\x6b\x00\xe1\x00\x7a\x00\x6b\x00\x61\x00\x20\x00\x6c\
\x00\x6f\x00\x6b\x00\x61\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x63\
\x00\x65\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\
\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x28\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x26\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x65\x02\xf0\x8c\x31\x00\x00\x00\x7a\x05\x93\x08\xe5\
\x00\x00\x00\x99\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xea\x0c\x4e\x30\xd8\
\x00\x00\x01\x0b\x0e\x9f\xe7\x05\x00\x00\x01\x26\x69\x00\x00\x01\
\x63\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0a\x00\x56\x00\x69\x00\x73\x00\x74\x00\
\x61\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\
\x00\x69\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0a\x00\x26\x00\x45\x00\x73\x00\x63\x00\x69\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0a\x00\x50\x00\x72\x00\x69\x00\x6d\x00\x6f\
\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x65\x00\
\x72\x00\x7a\x00\x6f\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x14\
\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x3a\x00\x20\
\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\x0e\x00\
\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x61\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0e\x00\x53\x00\x65\x00\x63\x00\x6f\
\x00\x6e\x00\x64\x00\x6f\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x14\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x00\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\
\x00\x50\x00\x72\x00\x6f\x00\x73\x00\x70\x00\x65\x00\x74\x00\x74\
\x00\x69\x00\x63\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x10\x00\x49\x00\x74\x00\x61\x00\x6c\x00\x69\x00\x61\x00\x6e\x00\
\x6f\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x69\x00\x6f\x00\x20\x00\x64\x00\x69\
\x00\x20\x00\x6c\x00\x6f\x00\x63\x00\x61\x00\x6c\x00\x69\x00\x7a\
\x00\x7a\x00\x61\x00\x7a\x00\x69\x00\x6f\x00\x6e\x00\x65\x05\x0e\
\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\
\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x35\x00\x4d\x09\xa4\x00\x00\x00\x50\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb2\x06\x3c\xe8\x53\
\x00\x00\x00\xc7\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x05\x0e\x9f\xe7\x05\x00\x00\x01\x1a\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x00\x56\x00\x69\x00\x73\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x08\x00\x26\x00\x46\x00\x69\x00\x6c\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x10\x00\x26\x00\x41\x00\
\x76\x00\x73\x00\x6c\x00\x75\x00\x74\x00\x74\x05\x00\x47\xdf\x04\
\x01\x03\x00\x00\x00\x0c\x00\x46\x00\xf8\x00\x72\x00\x73\x00\x74\
\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0c\x00\x54\x00\
\x72\x00\x65\x00\x64\x00\x6a\x00\x65\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x12\x00\x53\x00\x70\x00\x72\x00\xe5\x00\x6b\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0c\x00\x53\x00\x6b\x00\x6a\x00\x65\x00\x76\x00\x74\x05\x05\x93\
\x08\xe5\x01\x03\x00\x00\x00\x0a\x00\x41\x00\x6e\x00\x64\x00\x72\
\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x14\x00\x49\x00\
\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\x73\x00\
\x6b\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x14\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0a\x00\x4e\x00\x6f\x00\
\x72\x00\x73\x00\x6b\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x3a\
\x00\x49\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\x00\x61\x00\x73\
\x00\x6a\x00\x6f\x00\x6e\x00\x61\x00\x6c\x00\x69\x00\x73\x00\x65\
\x00\x72\x00\x69\x00\x6e\x00\x67\x00\x73\x00\x65\x00\x6b\x00\x73\
\x00\x65\x00\x6d\x00\x70\x00\x65\x00\x6c\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x24\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x22\x00\x47\xdf\x04\
\x00\x00\x00\x3b\x00\x4d\x09\xa4\x00\x00\x00\x54\x00\x5a\xf0\x84\
\x00\x00\x00\x69\x02\xf0\x8c\x31\x00\x00\x00\x7e\x05\x93\x08\xe5\
\x00\x00\x00\x9d\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcd\x06\xec\x79\x65\x00\x00\x00\xec\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x5f\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x06\x03\x8c\x03\xc8\x03\xb7\x05\x00\x05\
\xcf\xc7\x01\x03\x00\x00\x00\x0e\x00\x26\x03\x91\x03\xc1\x03\xc7\
\x03\xb5\x03\xaf\x03\xbf\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\
\x0e\x03\x88\x00\x26\x03\xbe\x03\xbf\x03\xb4\x03\xbf\x03\xc2\x05\
\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x0a\x03\xa0\x03\xc1\x03\xce\
\x03\xc4\x03\xbf\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x03\
\xa4\x03\xc1\x03\xaf\x03\xc4\x03\xbf\x05\x00\x5a\xf0\x84\x01\x03\
\x00\x00\x00\x14\x03\x93\x03\xbb\x03\xce\x03\xc3\x03\xc3\x03\xb1\
\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\
\x00\x00\x0c\x03\xa0\x03\xbb\x03\xac\x03\xb3\x03\xb9\x03\xb1\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0e\x03\x94\x03\xb5\x03\xcd\
\x03\xc4\x03\xb5\x03\xc1\x03\xbf\x05\x05\x9b\xa6\x44\x01\x03\x00\
\x00\x00\x14\x03\x99\x03\xc3\x03\xbf\x03\xbc\x03\xb5\x03\xc4\x03\
\xc1\x03\xb9\x03\xba\x03\xae\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\
\x00\x12\x03\xa0\x03\xc1\x03\xbf\x03\xbf\x03\xc0\x03\xc4\x03\xb9\
\x03\xba\x03\xae\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x10\x03\
\x95\x03\xbb\x03\xbb\x03\xb7\x03\xbd\x03\xb9\x03\xba\x03\xac\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x30\x03\xa0\x03\xb1\x03\xc1\
\x03\xac\x03\xb4\x03\xb5\x03\xb9\x03\xb3\x03\xbc\x03\xb1\x00\x20\
\x03\xb4\x03\xb9\x03\xb5\x03\xb8\x03\xbd\x03\xbf\x03\xc0\x03\xbf\
\x03\xaf\x03\xb7\x03\xc3\x03\xb7\x03\xc2\x05\x0e\x9f\xe7\x05\x01\
\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\
\x6f\x77\x00\
\x00\x00\x03\x26\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x2a\x00\x47\xdf\x04\
\x00\x00\x00\x45\x00\x4d\x09\xa4\x00\x00\x00\x5a\x00\x5a\xf0\x84\
\x00\x00\x00\x6d\x02\xf0\x8c\x31\x00\x00\x00\x80\x05\x93\x08\xe5\
\x00\x00\x00\x9f\x05\x9b\xa6\x44\x00\x00\x00\xb8\x06\x3c\xe8\x53\
\x00\x00\x00\xc9\x06\xec\x79\x65\x00\x00\x00\xe6\x0c\x4e\x30\xd8\
\x00\x00\x01\x07\x0e\x9f\xe7\x05\x00\x00\x01\x24\x69\x00\x00\x01\
\x61\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x0e\x00\x41\x00\x73\x00\x70\x00\x65\x00\
\x6b\x00\x74\x00\x6f\x05\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\
\x00\x26\x00\x44\x00\x6f\x00\x73\x00\x69\x00\x65\x00\x72\x00\x6f\
\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\
\x69\x00\x6e\x00\x69\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x08\
\x00\x55\x00\x6e\x00\x75\x00\x65\x05\x00\x4d\x09\xa4\x01\x03\x00\
\x00\x00\x08\x00\x54\x00\x72\x00\x69\x00\x65\x05\x00\x5a\xf0\x84\
\x01\x03\x00\x00\x00\x14\x00\x4c\x00\x69\x00\x6e\x00\x67\x00\x76\
\x00\x6f\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x6b\x00\
\x76\x00\x61\x05\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x00\x44\
\x00\x75\x00\x65\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x12\x00\
\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\x69\x00\
\x61\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\x00\x65\
\x00\x72\x00\x73\x00\x70\x00\x65\x00\x6b\x00\x74\x00\x69\x00\x76\
\x00\x61\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x12\x00\x45\x00\
\x73\x00\x70\x00\x65\x00\x72\x00\x61\x00\x6e\x00\x74\x00\x6f\x05\
\x0c\x4e\x30\xd8\x01\x03\x00\x00\x00\x32\x00\x45\x00\x6b\x00\x7a\
\x00\x65\x00\x6d\x00\x70\x00\x6c\x00\x6f\x00\x20\x00\x70\x00\x72\
\x00\x69\x00\x20\x00\x69\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x6e\
\x00\x61\x00\x63\x00\x69\x00\x69\x00\x67\x00\x6f\x05\x0e\x9f\xe7\
\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\
\x6e\x64\x6f\x77\x00\
\x00\x00\x03\x2a\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x39\x00\x4d\x09\xa4\x00\x00\x00\x4e\x00\x5a\xf0\x84\
\x00\x00\x00\x63\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x9b\x05\x9b\xa6\x44\x00\x00\x00\xb4\x06\x3c\xe8\x53\
\x00\x00\x00\xcb\x06\xec\x79\x65\x00\x00\x00\xe8\x0c\x4e\x30\xd8\
\x00\x00\x01\x09\x0e\x9f\xe7\x05\x00\x00\x01\x22\x69\x00\x00\x01\
\x65\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x00\x56\x00\x69\x00\x65\x00\x77\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x0a\x00\x26\x00\x46\x00\x69\
\x00\x6c\x00\x65\x05\x00\x2a\xd0\x25\x01\x03\x00\x00\x00\x0a\x00\
\x45\x00\x26\x00\x78\x00\x69\x00\x74\x05\x00\x47\xdf\x04\x01\x03\
\x00\x00\x00\x0a\x00\x46\x00\x69\x00\x72\x00\x73\x00\x74\x05\x00\
\x4d\x09\xa4\x01\x03\x00\x00\x00\x0a\x00\x54\x00\x68\x00\x69\x00\
\x72\x00\x64\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x18\x00\x4c\
\x00\x61\x00\x6e\x00\x67\x00\x75\x00\x61\x00\x67\x00\x65\x00\x3a\
\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\x03\x00\x00\x00\
\x0e\x00\x4f\x00\x62\x00\x6c\x00\x69\x00\x71\x00\x75\x00\x65\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x0c\x00\x53\x00\x65\x00\x63\
\x00\x6f\x00\x6e\x00\x64\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\
\x12\x00\x49\x00\x73\x00\x6f\x00\x6d\x00\x65\x00\x74\x00\x72\x00\
\x69\x00\x63\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x16\x00\x50\
\x00\x65\x00\x72\x00\x73\x00\x70\x00\x65\x00\x63\x00\x74\x00\x69\
\x00\x76\x00\x65\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\x0e\x00\
\x45\x00\x6e\x00\x67\x00\x6c\x00\x69\x00\x73\x00\x68\x05\x0c\x4e\
\x30\xd8\x01\x03\x00\x00\x00\x38\x00\x49\x00\x6e\x00\x74\x00\x65\
\x00\x72\x00\x6e\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x61\
\x00\x6c\x00\x69\x00\x7a\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x20\x00\x45\x00\x78\x00\x61\x00\x6d\x00\x70\x00\x6c\x00\x65\
\x05\x0e\x9f\xe7\x05\x01\x2f\x00\x00\x01\x3e\x00\x97\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x4d\x61\
\x69\x6e\x57\x69\x6e\x64\x6f\x77\x00\
\x00\x00\x02\xd2\
\x3c\
\xb8\x64\x18\xca\xef\x9c\x95\xcd\x21\x1c\xbf\x60\xa1\xbd\xdd\x42\
\x00\x00\x00\x68\x00\x00\x51\x92\x00\x00\x00\x00\x00\x05\xcf\xc7\
\x00\x00\x00\x11\x00\x2a\xd0\x25\x00\x00\x00\x24\x00\x47\xdf\x04\
\x00\x00\x00\x3f\x00\x4d\x09\xa4\x00\x00\x00\x56\x00\x5a\xf0\x84\
\x00\x00\x00\x67\x02\xf0\x8c\x31\x00\x00\x00\x78\x05\x93\x08\xe5\
\x00\x00\x00\x8f\x05\x9b\xa6\x44\x00\x00\x00\xa4\x06\x3c\xe8\x53\
\x00\x00\x00\xb5\x06\xec\x79\x65\x00\x00\x00\xca\x0c\x4e\x30\xd8\
\x00\x00\x00\xdb\x0e\x9f\xe7\x05\x00\x00\x00\xec\x69\x00\x00\x01\
\x0d\x03\x00\x00\x00\x06\x00\x4c\x00\x54\x00\x52\x05\x00\x00\x51\
\x92\x01\x03\x00\x00\x00\x08\x88\x68\x79\x3a\x65\xb9\x5f\x0f\x05\
\x00\x05\xcf\xc7\x01\x03\x00\x00\x00\x10\x30\xd5\x30\xa1\x30\xa4\
\x30\xeb\x00\x28\x00\x26\x00\x46\x00\x29\x05\x00\x2a\xd0\x25\x01\
\x03\x00\x00\x00\x0c\x7d\x42\x4e\x86\x00\x28\x00\x26\x00\x58\x00\
\x29\x05\x00\x47\xdf\x04\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x00\
\x88\x4c\x05\x00\x4d\x09\xa4\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\
\x09\x88\x4c\x05\x00\x5a\xf0\x84\x01\x03\x00\x00\x00\x0c\x8a\x00\
\x8a\x9e\x00\x3a\x00\x20\x00\x25\x00\x31\x05\x02\xf0\x8c\x31\x01\
\x03\x00\x00\x00\x0a\x65\x9c\x30\x81\x62\x95\x5f\x71\x6c\xd5\x05\
\x05\x93\x08\xe5\x01\x03\x00\x00\x00\x06\x7b\x2c\x4e\x8c\x88\x4c\
\x05\x05\x9b\xa6\x44\x01\x03\x00\x00\x00\x0a\x7b\x49\x89\xd2\x62\
\x95\x5f\x71\x6c\xd5\x05\x06\x3c\xe8\x53\x01\x03\x00\x00\x00\x06\
\x90\x60\x8f\xd1\x6c\xd5\x05\x06\xec\x79\x65\x01\x03\x00\x00\x00\
\x06\x65\xe5\x67\x2c\x8a\x9e\x05\x0c\x4e\x30\xd8\x01\x03\x00\x00\
\x00\x16\x56\xfd\x96\x9b\x53\x16\x00\x28\x00\x69\x00\x31\x00\x38\
\x00\x6e\x00\x29\x30\x6e\x4f\x8b\x05\x0e\x9f\xe7\x05\x01\x2f\x00\
\x00\x01\x3e\x00\x97\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x0a\x4d\x61\x69\x6e\x57\x69\x6e\x64\x6f\x77\
\x00\
"
qt_resource_name = b"\
\x00\x0c\
\x0d\xfc\x11\x13\
\x00\x74\
\x00\x72\x00\x61\x00\x6e\x00\x73\x00\x6c\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\
\x00\x0a\
\x04\x50\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x66\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6f\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6b\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x65\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x72\x00\x75\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x67\x1c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x73\x00\x76\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x58\x0c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x64\x00\x65\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x7d\x3c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x7a\x00\x68\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x55\xdc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x57\xec\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x63\x00\x73\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6d\xfc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x69\x00\x74\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x68\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6e\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x56\x7c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6c\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\xac\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6f\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x59\x9c\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x65\x00\x6e\x00\x2e\x00\x71\x00\x6d\
\x00\x0a\
\x04\x6c\xbc\x9d\
\x00\x69\
\x00\x31\x00\x38\x00\x6e\x00\x5f\x00\x6a\x00\x70\x00\x2e\x00\x71\x00\x6d\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x0e\x00\x00\x00\x02\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xba\x00\x00\x00\x00\x00\x01\x00\x00\x12\x76\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xce\
\x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00\x15\x5a\
\x00\x00\x00\x86\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x62\
\x00\x00\x01\x56\x00\x00\x00\x00\x00\x01\x00\x00\x25\x20\
\x00\x00\x01\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x21\xf6\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x06\x06\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x09\x30\
\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xa6\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x28\x4e\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x18\x7a\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x03\x50\
\x00\x00\x00\xa0\x00\x00\x00\x00\x00\x01\x00\x00\x0f\xb6\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-3.0 |
an7oine/WinVHS | Cygwin/lib/python2.7/wsgiref/simple_server.py | 19 | 4768 | """BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib, sys
from wsgiref.handlers import SimpleHandler
__version__ = "0.1"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from StringIO import StringIO
stdout = StringIO()
print >>stdout, "Hello world!"
print >>stdout
h = environ.items(); h.sort()
for k,v in h:
print >>stdout, k,'=', repr(v)
start_response("200 OK", [('Content-Type','text/plain')])
return [stdout.getvalue()]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
httpd = make_server('', 8000, demo_app)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
httpd.server_close()
| gpl-3.0 |
Marcusz97/CILP_Facilitatore_Audacity | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Tools/kde4.py | 275 | 2007 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,re
from waflib import Options,TaskGen,Task,Utils
from waflib.TaskGen import feature,after_method
@feature('msgfmt')
def apply_msgfmt(self):
for lang in self.to_list(self.langs):
node=self.path.find_resource(lang+'.po')
task=self.create_task('msgfmt',node,node.change_ext('.mo'))
langname=lang.split('/')
langname=langname[-1]
inst=getattr(self,'install_path','${KDE4_LOCALE_INSTALL_DIR}')
self.bld.install_as(inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+getattr(self,'appname','set_your_appname')+'.mo',task.outputs[0],chmod=getattr(self,'chmod',Utils.O644))
class msgfmt(Task.Task):
color='BLUE'
run_str='${MSGFMT} ${SRC} -o ${TGT}'
def configure(self):
kdeconfig=self.find_program('kde4-config')
prefix=self.cmd_and_log('%s --prefix'%kdeconfig).strip()
fname='%s/share/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:
fname='%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:self.fatal('could not open %s'%fname)
try:
txt=Utils.readf(fname)
except(OSError,IOError):
self.fatal('could not read %s'%fname)
txt=txt.replace('\\\n','\n')
fu=re.compile('#(.*)\n')
txt=fu.sub('',txt)
setregexp=re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found=setregexp.findall(txt)
for(_,key,val)in found:
self.env[key]=val
self.env['LIB_KDECORE']=['kdecore']
self.env['LIB_KDEUI']=['kdeui']
self.env['LIB_KIO']=['kio']
self.env['LIB_KHTML']=['khtml']
self.env['LIB_KPARTS']=['kparts']
self.env['LIBPATH_KDECORE']=[os.path.join(self.env.KDE4_LIB_INSTALL_DIR,'kde4','devel'),self.env.KDE4_LIB_INSTALL_DIR]
self.env['INCLUDES_KDECORE']=[self.env['KDE4_INCLUDE_INSTALL_DIR']]
self.env.append_value('INCLUDES_KDECORE',[self.env['KDE4_INCLUDE_INSTALL_DIR']+os.sep+'KDE'])
self.find_program('msgfmt',var='MSGFMT')
| gpl-2.0 |
d-lee/airflow | airflow/macros/hive.py | 29 | 4204 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
def max_partition(
table, schema="default", field=None, filter=None,
metastore_conn_id='metastore_default'):
'''
Gets the max partition for a table.
:param schema: The hive schema the table lives in
:type schema: string
:param table: The hive table you are interested in, supports the dot
notation as in "my_database.my_table", if a dot is found,
the schema param is disregarded
:type table: string
:param hive_conn_id: The hive connection you are interested in.
If your default is set you don't need to use this parameter.
:type hive_conn_id: string
:param filter: filter on a subset of partition as in
`sub_part='specific_value'`
:type filter: string
:param field: the field to get the max value from. If there's only
one partition field, this will be inferred
>>> max_partition('airflow.static_babynames_partitioned')
'2015-01-01'
'''
from airflow.hooks.hive_hooks import HiveMetastoreHook
if '.' in table:
schema, table = table.split('.')
hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
return hh.max_partition(
schema=schema, table_name=table, field=field, filter=filter)
def _closest_date(target_dt, date_list, before_target=None):
'''
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:type target_dt: datetime.date
:param date_list: The list of dates to search
:type date_list: datetime.date list
:param before_target: closest before or after the target
:type before_target: bool or None
:returns: The closest date
:rtype: datetime.date or None
'''
fb = lambda d: d - target_dt if d >= target_dt else datetime.timedelta.max
fa = lambda d: d - target_dt if d <= target_dt else datetime.timedelta.min
fnone = lambda d: target_dt - d if d < target_dt else d - target_dt
if before_target is None:
return min(date_list, key=fnone).date()
if before_target:
return min(date_list, key=fb).date()
else:
return min(date_list, key=fa).date()
def closest_ds_partition(
table, ds, before=True, schema="default",
metastore_conn_id='metastore_default'):
'''
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param table: A hive table name
:type table: str
:param ds: A datestamp ``%Y-%m-%d`` e.g. ``yyyy-mm-dd``
:type ds: datetime.date list
:param before: closest before (True), after (False) or either side of ds
:type before: bool or None
:returns: The closest date
:rtype: str or None
>>> tbl = 'airflow.static_babynames_partitioned'
>>> closest_ds_partition(tbl, '2015-01-02')
'2015-01-01'
'''
from airflow.hooks.hive_hooks import HiveMetastoreHook
if '.' in table:
schema, table = table.split('.')
hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
partitions = hh.get_partitions(schema=schema, table_name=table)
if not partitions:
return None
part_vals = [list(p.values())[0] for p in partitions]
if ds in part_vals:
return ds
else:
parts = [datetime.datetime.strptime(pv, '%Y-%m-%d')
for pv in part_vals]
target_dt = datetime.datetime.strptime(ds, '%Y-%m-%d')
closest_ds = _closest_date(target_dt, parts, before_target=before)
return closest_ds.isoformat()
| apache-2.0 |
amarian12/zetacoin_p2pool | nattraverso/pynupnp/__init__.py | 288 | 1088 | """
This package offers ways to retreive ip addresses of the machine, and map ports
through UPnP devices.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from nattraverso.pynupnp.upnp import search_upnp_device, UPnPMapper
def get_external_ip():
"""
Returns a deferred which will be called with the WAN ip address
retreived through UPnP. The ip is a string of the form "x.x.x.x"
@return: A deferred called with the external ip address of this host
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: x.get_external_ip())
def get_port_mapper():
"""
Returns a deferred which will be called with a L{UPnPMapper} instance.
This is a L{nattraverso.portmapper.NATMapper} implementation.
@return: A deferred called with the L{UPnPMapper} instance.
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: UPnPMapper(x))
| gpl-3.0 |
perezg/infoxchange | BASE/lib/python2.7/site-packages/django/core/cache/backends/base.py | 99 | 7900 | "Base Cache class."
from __future__ import unicode_literals
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
from django.utils.importlib import import_module
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
key_func_module_path, key_func_name = key_func.rsplit('.', 1)
key_func_module = import_module(key_func_module_path)
return getattr(key_func_module, key_func_name)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION', None))
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). An different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=None, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=None, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=None, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version+delta)
self.delete(key, version=version)
return version+delta
def decr_version(self, key, delta=1, version=None):
"""Substracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
| apache-2.0 |
encukou/freeipa | ipatests/test_ipalib/test_crud.py | 8 | 7143 | # Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.crud` module.
"""
from ipatests.util import raises, get_api, ClassChecker
from ipalib import crud, frontend
from ipalib.parameters import Str
import pytest
pytestmark = pytest.mark.tier0
class CrudChecker(ClassChecker):
"""
Class for testing base classes in `ipalib.crud`.
"""
def get_api(self, args=tuple(), options=tuple()):
"""
Return a finalized `ipalib.plugable.API` instance.
"""
api, _home = get_api()
class user(frontend.Object):
takes_params = (
'givenname',
Str('sn', flags='no_update'),
Str('uid', primary_key=True),
'initials',
Str('uidnumber', flags=['no_create', 'no_search'])
)
class user_verb(self.cls):
takes_args = args
takes_options = options
api.add_plugin(user)
api.add_plugin(user_verb)
api.finalize()
return api
class test_Create(CrudChecker):
"""
Test the `ipalib.crud.Create` class.
"""
_cls = crud.Create
def test_get_args(self):
"""
Test the `ipalib.crud.Create.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Create.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'initials', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name != 'version':
assert param.required is True
api = self.get_api(options=(Str('extra?'),))
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'initials', 'extra', 'all', 'raw', 'version']
assert api.Method.user_verb.options.extra.required is False
class test_Update(CrudChecker):
"""
Test the `ipalib.crud.Update` class.
"""
_cls = crud.Update
def test_get_args(self):
"""
Test the `ipalib.crud.Update.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Update.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'initials', 'uidnumber', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name in ['all', 'raw']:
assert param.required is True
else:
assert param.required is False
class test_Retrieve(CrudChecker):
"""
Test the `ipalib.crud.Retrieve` class.
"""
_cls = crud.Retrieve
def test_get_args(self):
"""
Test the `ipalib.crud.Retrieve.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Retrieve.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == ['all', 'raw', 'version']
class test_Delete(CrudChecker):
"""
Test the `ipalib.crud.Delete` class.
"""
_cls = crud.Delete
def test_get_args(self):
"""
Test the `ipalib.crud.Delete.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['uid']
assert api.Method.user_verb.args.uid.required is True
def test_get_options(self):
"""
Test the `ipalib.crud.Delete.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == ['version']
assert len(api.Method.user_verb.options) == 1
class test_Search(CrudChecker):
"""
Test the `ipalib.crud.Search` class.
"""
_cls = crud.Search
def test_get_args(self):
"""
Test the `ipalib.crud.Search.get_args` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.args) == ['criteria']
assert api.Method.user_verb.args.criteria.required is False
def test_get_options(self):
"""
Test the `ipalib.crud.Search.get_options` method.
"""
api = self.get_api()
assert list(api.Method.user_verb.options) == \
['givenname', 'sn', 'uid', 'initials', 'all', 'raw', 'version']
for param in api.Method.user_verb.options():
if param.name in ['all', 'raw']:
assert param.required is True
else:
assert param.required is False
class test_CrudBackend(ClassChecker):
"""
Test the `ipalib.crud.CrudBackend` class.
"""
_cls = crud.CrudBackend
def get_subcls(self):
class ldap(self.cls):
pass
return ldap
def check_method(self, name, *args):
api = 'the api instance'
o = self.cls(api)
e = raises(NotImplementedError, getattr(o, name), *args)
assert str(e) == 'CrudBackend.%s()' % name
sub = self.subcls(api)
e = raises(NotImplementedError, getattr(sub, name), *args)
assert str(e) == 'ldap.%s()' % name
def test_create(self):
"""
Test the `ipalib.crud.CrudBackend.create` method.
"""
self.check_method('create')
def test_retrieve(self):
"""
Test the `ipalib.crud.CrudBackend.retrieve` method.
"""
self.check_method('retrieve', 'primary key', 'attribute')
def test_update(self):
"""
Test the `ipalib.crud.CrudBackend.update` method.
"""
self.check_method('update', 'primary key')
def test_delete(self):
"""
Test the `ipalib.crud.CrudBackend.delete` method.
"""
self.check_method('delete', 'primary key')
def test_search(self):
"""
Test the `ipalib.crud.CrudBackend.search` method.
"""
self.check_method('search')
| gpl-3.0 |
liuzcgithub/chef-repo | cookbooks/CLM_E1_APP_COOKBOOK/files/default/WASCommon/BeforeConvertCert.py | 2 | 5432 |
def AddCustomProperties(appserver,appnode):
print "----- Add custom properties ----------------"
serverid = AdminConfig.getid("/Cell:" + cellName + "/Node:" + appnode + "/Server:" + appserver +"/")
pp = AdminConfig.list('PluginProperties',serverid)
for propname in properties.keys():
if propname.find("ppcp.") == 0:
pname = propname[5:]
pvalue = properties.getProperty(propname)
profcmd = '[[validationExpression ""]'
profcmd = profcmd + ' [name \"' + pname + '\"]'
profcmd = profcmd + ' [description ""]'
profcmd = profcmd + ' [value \"' + pvalue + '\"]'
profcmd = profcmd + ' [required "false"]]'
print "== creating property:",pname,":",pvalue
rc = AdminConfig.create('Property', pp, profcmd)
AdminConfig.save()
def SetAttributes(appserver,appnode):
print "----------------- Set SessionManager attributes -----------------"
serverid = AdminConfig.getid("/Cell:" + cellName + "/Node:" + appserverNodeName + "/Server:" + appserverName +"/")
sm = AdminConfig.list('SessionManager',serverid)
smprops = "[[enableCookies " + properties.getProperty("sm.enableCookies")
smprops = smprops + "]]"
rc = AdminConfig.modify(sm,smprops)
rc = AdminConfig.save()
############################
# Main
# 1. add custom properties in the webserver plugin properties
# 2. Delete CMS Keystore
# 3.
############################
import sys
import java.util as util
import java.io as javaio
import os
import shutil
import commands
propdir="/tmp/WASCommon"
if len(sys.argv) > 0:
d=sys.argv[0]
if os.path.exists(d):
propdir=d
print "----- Load properties from directory: " + propdir + "\n"
#load JVM Properties from properties file
properties = util.Properties()
propertiesfis =javaio.FileInputStream(propdir + "/was.properties")
properties.load(propertiesfis)
sess = AdminConfig.list("JavaVirtualMachine").split("\n")
cell=AdminConfig.list('Cell')
cellName=AdminConfig.showAttribute(cell,'name')
print "== Cell name is",cellName
serverList=AdminTask.listServers('[-serverType APPLICATION_SERVER ]')
servers=serverList.split("\n")
appserverName=AdminConfig.showAttribute(servers[0],'name')
appserverNodeName=servers[0].split("nodes/")[1].split("/servers/")[0]
print "== AppServerName is",appserverName
print "== AppServer node name is",appserverNodeName
WebserverList=AdminTask.listServers('[-serverType WEB_SERVER ]')
WebserverName=AdminConfig.showAttribute(WebserverList,'name')
AfterNodes=WebserverList.split("nodes/")[1]
WebserverNodeName=AfterNodes.split("/servers/")[0]
print "WebServer node name is " + WebserverNodeName + "\n"
print "WebServerName is " + WebserverName + "\n"
AddCustomProperties(WebserverName,WebserverNodeName)
# Delete CMS keystore
command="[-keyStoreName CMSKeyStore -keyStoreScope (cell):" + cellName+":(node):" + WebserverNodeName + ":(server):" + WebserverName + " -certificateAlias default ]"
print "command is " + command
AdminTask.deleteCertificate(command)
AdminConfig.save()
print "Modify SSL Configuration to use TLSv1.2"
SSLConfigCmd="[-alias NodeDefaultSSLSettings -scopeName (cell):"
SSLConfigCmd= SSLConfigCmd + cellName + ":(node):" + appserverNodeName + " -keyStoreName NodeDefaultKeyStore -keyStoreScopeName (cell):"
SSLConfigCmd= SSLConfigCmd + cellName + ":(node):" + appserverNodeName + " -trustStoreName NodeDefaultTrustStore -trustStoreScopeName (cell):"
SSLConfigCmd= SSLConfigCmd + cellName + ":(node):" + appserverNodeName + " -jsseProvider IBMJSSE2 -sslProtocol TLSv1.2 -clientAuthentication false -clientAuthenticationSupported false -securityLevel HIGH -enabledCiphers ]"
print "SSLConfigCmd: " + SSLConfigCmd
AdminTask.modifySSLConfig(SSLConfigCmd)
AdminConfig.save()
print "Convert Security certificate"
AdminTask.convertCertForSecurityStandard('[-fipsLevel SP800-131 -signatureAlgorithm SHA256withRSA -keySize 2048 ]')
# Enable FIPS to Convert Certificate
#print "enable FIPS"
#AdminTask.enableFips('[-enableFips true -fipsLevel SP800-131 ]')
#print "Disable FIPS for retrieve signer from IHS host"
#AdminTask.enableFips('[-enableFips false ]')
#AdminConfig.save()
#commands.getoutput('/opt/IBM/WebSphere/Profiles/DefaultAppSrv01/bin/stopServer.sh server1')
#commands.getoutput('/opt/IBM/WebSphere/Profiles/DefaultAppSrv01/bin/startServer.sh server1')
#AdminTask.modifySSLConfig('[-alias NodeDefaultSSLSettings -scopeName (cell):CloudBurstCell_5:(node):CloudBurstNode_5 -keyStoreName NodeDefaultKeyStore -keyStoreScopeName (cell):CloudBurstCell_5:(node):CloudBurstNode_5 -trustStoreName NodeDefaultTrustStore -trustStoreScopeName (cell):CloudBurstCell_5:(node):CloudBurstNode_5 -jsseProvider IBMJSSE2 -sslProtocol TLSv1.2 -clientAuthentication false -clientAuthenticationSupported false -securityLevel HIGH -enabledCiphers ]')
#for appserver in servers:
# appserverName=AdminConfig.showAttribute(appserver,'name')
# appserverNodeName=appserver.split("nodes/")[1].split("/servers/")[0]
# print "== AppServerName is",appserverName
# print "== AppServer node name is",appserverNodeName
# serverxml='(cells/' + cellName + '/nodes/' + appserverNodeName + '/servers/' + appserverName + '|server.xml)'
# sessionMgr=AdminConfig.list('SessionManager',serverxml)
# print "== SessionManager id is",sessionMgr
# AddCustomProperties(appserverName,appserverNodeName)
# SetAttributes(appserverName,appserverNodeName)
| apache-2.0 |
JohannesBuchner/regulargrid | regulargrid/test/test_regulargrid.py | 1 | 1252 | from regulargrid.regulargrid import RegularGrid
import numpy
def test_regular_grid():
rg = RegularGrid([(0,1), (0,1)], numpy.array([[], []]), numpy.array([[1, 2], [3, 4]]))
for x in [0, 1, 0.5]:
for y in [0, 1, 0.5]:
print x, y, rg([x,y])
if __name__ == '__main__':
numpy.random.seed(0)
limits = [(-10,10), (-3,3)]
breaks = [numpy.array(sorted(numpy.random.uniform(lo, hi, size=50))) for lo,hi in limits]
npoints = numpy.product([len(b)+2 for b in breaks])
inputvalues = []
values = []
for x in [limits[0][0]] + list(breaks[0]) + [limits[0][1]]:
vx = []
for y in [limits[1][0]] + list(breaks[1]) + [limits[1][1]]:
z = numpy.random.normal(0, 1)**2.
z = numpy.exp(-0.5 * ((x - -3)**2 + (y - 1)**2.))
inputvalues.append([x, y, z])
vx.append(z)
values.append(vx)
#print ' y', len(vx), len(breaks[1]), limits[1]
#print ' x', len(values), len(breaks[0]), limits[0]
values = numpy.array(values)
print values.shape
rg = RegularGrid(limits, breaks, values)
outputvalues = []
for i in range(1000):
x = [numpy.random.uniform(lo, hi) for lo,hi in limits]
outputvalues.append(x + [rg(x)])
numpy.savetxt("regulargrid_test_input.txt", inputvalues)
numpy.savetxt("regulargrid_test_output.txt", outputvalues)
| bsd-2-clause |
jmgc/myhdl-numeric | example/cookbook/sinecomp/test_SineComputer.py | 6 | 2266 | from math import pi, sin, cos, log
import random
from myhdl import *
from SineComputer import SineComputer, SineComputer_v
def bench(fractionSize, errorMargin, nrTests=100):
""" Test bench for SineComputer.
fractionSize: number of bits after the point
errorMargin: margin for rounding errors on result
nrTests: number of tests vectors
"""
# scaling factor to represent floats as integers
M = 2**fractionSize
# maximum angle
ZMAX = int(round(M*pi/2))
# error margin shorthand
D = errorMargin
# signals
cos_z0 = Signal(intbv(0, min=-D, max=M+D))
sin_z0 = Signal(intbv(0, min=-M-D, max=M+D))
z0 = Signal(intbv(0, min=-ZMAX, max=ZMAX+1))
done = Signal(False)
start = Signal(False)
clock = Signal(bool(0))
reset = Signal(True)
# design under test
# dut = SineComputer(cos_z0, sin_z0, done, z0, start, clock, reset)
dut = SineComputer_v(cos_z0, sin_z0, done, z0, start, clock, reset)
# clock generator
@always(delay(10))
def clockgen():
clock.next = not clock
# test vector setup
testAngles = [-pi/2, -pi/4, 0.0, pi/4, pi/2]
testAngles.extend([random.uniform(-pi/2, pi/2) for i in range(nrTests)])
## testAngles.extend([random.uniform(-0.01, 0.01) for i in range(nrTests)])
## testAngles.extend([random.uniform(pi/2-0.01, pi/2) for i in range(nrTests)])
## testAngles.extend([random.uniform(-pi/2, -pi/2+0.01) for i in range(nrTests)])
# actual test
@instance
def check():
yield clock.negedge
reset.next = False
for z in testAngles:
yield clock.negedge
z0.next = int(round(M*z))
start.next = True
yield clock.negedge
start.next = False
yield done.posedge
exp_cos_z0 = int(round(cos(z)*M))
exp_sin_z0 = int(round(sin(z)*M))
assert abs(cos_z0 - exp_cos_z0) < D
assert abs(sin_z0 - exp_sin_z0) < D
raise StopSimulation
return dut, clockgen, check
def test_bench():
fractionSize = 18
errorMargin = fractionSize
tb = bench(fractionSize, errorMargin)
sim = Simulation(tb)
sim.run()
if __name__ == '__main__':
test_bench()
| lgpl-2.1 |
minhphung171093/OpenERP_V8 | openerp/addons/hr_attendance/__init__.py | 434 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chys87/scripts | sys-init/user_init.py | 1 | 5845 | from __future__ import absolute_import, print_function
import os
import shutil
from . import utils
class Skel(utils.Task):
def run(self):
skel = '/etc/skel'
if not os.path.isdir(skel):
return
for name in os.listdir(skel):
if not name.startswith('.'):
continue
full = os.path.join(skel, name)
if not os.path.isfile(full):
continue
if os.path.exists(os.path.join(self.env.home, name)):
continue
print('Copying {} to {}'.format(full, self.env.home))
shutil.copy2(full, self.env.home)
class Shellrc(utils.Task):
_script = r'''
if [ -r ~/.shellrc ]; then
. ~/.shellrc
fi
:
'''
def run(self):
dst = os.path.join(self.env.home, '.shellrc')
if not os.path.exists(dst):
utils.auto_symlink(os.path.join(self.env.base, 'shellrc'), dst)
for name in ['.bashrc', '.zshrc']:
with open(os.path.join(self.env.home, name), 'a') as f:
f.write(self._script)
class DotFiles(utils.Task):
_files = {
'npmrc': '.npmrc',
'pip.conf': '.pip/pip.conf',
'tmux.conf': '.tmux.conf',
'vimrc': '.vimrc',
}
def run(self):
for name, target in self._files.items():
dst = os.path.join(self.env.home, target)
src = os.path.join(self.env.base, name)
if not os.path.exists(dst):
utils.auto_symlink(src, dst)
class VimPlugin(utils.Task):
root = False
_plugins = {
'vim-neatstatus': 'https://github.com/maciakl/vim-neatstatus.git',
'vim-localvimrc': 'https://github.com/embear/vim-localvimrc.git',
}
_bundles = {
'typescript-vim': 'https://github.com/leafgarland/typescript-vim.git',
'vim-less': 'https://github.com/groenewege/vim-less.git',
# Another possibility is https://github.com/dcharbon/vim-flatbuffers.git
# but this one seems more nicely colored
'zchee-vim-flatbuffers': 'https://github.com/zchee/vim-flatbuffers.git',
}
_autoloads = {
'vim-pathogen': {
'url': 'https://github.com/tpope/vim-pathogen.git',
'file': 'autoload/pathogen.vim',
},
}
def __init__(self, env):
super(VimPlugin, self).__init__(env)
self.autoloaddir = os.path.join(env.home, '.vim', 'autoload')
self.plugindir = os.path.join(env.home, '.vim', 'plugin')
self.bundledir = os.path.join(env.home, '.vim', 'bundle')
def run(self):
utils.mkdirp(self.autoloaddir)
utils.mkdirp(self.plugindir)
utils.mkdirp(self.bundledir)
for name, conf in self._autoloads.items():
self.run_item(self.autoloaddir, conf['url'], name, conf['file'])
for name, url in self._plugins.items():
self.run_item(self.plugindir, url, name)
for name, url in self._bundles.items():
self.run_item(self.bundledir, url, name)
def run_item(self, link_dir, url, clone_name, link_file='.'):
clone_dir = self.env.external.clone(url, clone_name,
update=self.env.git_pull)
link_target = os.path.normpath(os.path.join(clone_dir, link_file))
link = os.path.join(link_dir, os.path.basename(link_target))
utils.auto_symlink(link_target, link)
class Gitconfig(utils.Task):
root = False
def run(self):
including_file = os.path.join(self.env.home, '.gitconfig')
included_file = os.path.relpath(
os.path.join(self.env.base, 'gitconfig'), self.env.home)
try:
with open(including_file, 'r') as f:
content = f.read()
except utils.FileNotFoundError:
content = ''
if included_file not in content:
print('Modifying {}'.format(including_file))
with open(including_file, 'a') as f:
print('[include]', file=f)
print('\tpath = {}'.format(included_file), file=f)
utils.auto_symlink(os.path.join(self.env.base, 'gitignore'),
os.path.join(self.env.home, '.config', 'git', 'ignore'))
class ExternalRepos(utils.Task):
root = False
_repos = {
'oh-my-zsh': 'https://github.com/ohmyzsh/ohmyzsh.git',
}
def run(self):
for name, url in self._repos.items():
self.env.external.clone(url, name, update=self.env.git_pull)
class InstallScripts(utils.Task):
root = False
_scripts = {
}
_remote_scripts = {
'guess-ssh-agent': 'guess-ssh-agent.sh',
}
def run(self):
bin_dir = os.path.join(self.env.home, 'bin2')
utils.mkdirp(bin_dir)
scripts = dict(self._scripts)
if self.env.is_remote:
scripts.update(self._remote_scripts)
for link, target in scripts.items():
link = os.path.join(bin_dir, link)
target = os.path.join(self.env.base, target)
if not os.path.exists(link):
utils.auto_symlink(target, link)
class InstallBinDirs(utils.Task):
root = False
_dirs = {
'node-tools': 'node-tools/bin',
'git-tools': 'git-tools',
}
def run(self):
bin_dir = os.path.join(self.env.home, 'bin.d')
utils.mkdirp(bin_dir)
for link, target in self._dirs.items():
link = os.path.join(bin_dir, link)
target= os.path.join(self.env.base, target)
if not os.path.exists(link):
utils.auto_symlink(target, link)
class Mkdir(utils.Task):
_dirs = [
'tmp',
]
def run(self):
for dirname in self._dirs:
path = os.path.join(self.env.home, dirname)
utils.mkdirp(path)
| bsd-3-clause |
wwj718/murp-edx | common/lib/xmodule/xmodule/capa_module.py | 18 | 7645 | """Implements basics of Capa, including class CapaModule."""
import json
import logging
import sys
from pkg_resources import resource_string
from .capa_base import CapaMixin, CapaFields, ComplexEncoder
from .progress import Progress
from xmodule.x_module import XModule, module_attr
from xmodule.raw_module import RawDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
log = logging.getLogger("edx.courseware")
class CapaModule(CapaMixin, XModule):
"""
An XModule implementing LonCapa format problems, implemented by way of
capa.capa_problem.LoncapaProblem
CapaModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
icon_class = 'problem'
js = {
'coffee': [
resource_string(__name__, 'js/src/capa/display.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
resource_string(__name__, 'js/src/capa/imageinput.js'),
resource_string(__name__, 'js/src/capa/schematic.js'),
]
}
js_module_name = "Problem"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Accepts the same arguments as xmodule.x_module:XModule.__init__
"""
super(CapaModule, self).__init__(*args, **kwargs)
def handle_ajax(self, dispatch, data):
"""
This is called by courseware.module_render, to handle an AJAX call.
`data` is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'problem_get': self.get_problem,
'problem_check': self.check_problem,
'problem_reset': self.reset_problem,
'problem_save': self.save_problem,
'problem_show': self.get_answer,
'score_update': self.update_score,
'input_ajax': self.handle_input_ajax,
'ungraded_response': self.handle_ungraded_response
}
_ = self.runtime.service(self, "i18n").ugettext
generic_error_message = _(
"We're sorry, there was an error with processing your request. "
"Please try reloading your page and trying again."
)
not_found_error_message = _(
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
if dispatch not in handlers:
return 'Error: {} is not a known capa action'.format(dispatch)
before = self.get_progress()
try:
result = handlers[dispatch](data)
except NotFoundError as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError, (not_found_error_message, err), traceback_obj
except Exception as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError, (generic_error_message, err), traceback_obj
after = self.get_progress()
result.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
'progress_detail': Progress.to_js_detail_str(after),
})
return json.dumps(result, cls=ComplexEncoder)
class CapaDescriptor(CapaFields, RawDescriptor):
"""
Module implementing problems in the LON-CAPA format,
as implemented by capa.capa_problem
"""
module_class = CapaModule
has_score = True
template_dir_name = 'problem'
mako_template = "widgets/problem-edit.html"
js = {'coffee': [resource_string(__name__, 'js/src/problem/edit.coffee')]}
js_module_name = "MarkdownEditingDescriptor"
css = {
'scss': [
resource_string(__name__, 'css/editor/edit.scss'),
resource_string(__name__, 'css/problem/edit.scss')
]
}
# Capa modules have some additional metadata:
# TODO (vshnayder): do problems have any other metadata? Do they
# actually use type and points?
metadata_attributes = RawDescriptor.metadata_attributes + ('type', 'points')
# The capa format specifies that what we call max_attempts in the code
# is the attribute `attempts`. This will do that conversion
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['attempts'] = 'max_attempts'
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return (not 'latex' in template['template_id'] or course.use_latex_compiler)
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({
'markdown': self.markdown,
'enable_markdown': self.markdown is not None,
'enable_latex_compiler': self.use_latex_compiler,
})
return _context
# VS[compat]
# TODO (cpennington): Delete this method once all fall 2012 course are being
# edited in the cms
@classmethod
def backcompat_paths(cls, path):
return [
'problems/' + path[8:],
path[8:],
]
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CapaDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
CapaDescriptor.due,
CapaDescriptor.graceperiod,
CapaDescriptor.force_save_button,
CapaDescriptor.markdown,
CapaDescriptor.text_customization,
CapaDescriptor.use_latex_compiler,
])
return non_editable_fields
# Proxy to CapaModule for access to any of its attributes
answer_available = module_attr('answer_available')
check_button_name = module_attr('check_button_name')
check_button_checking_name = module_attr('check_button_checking_name')
check_problem = module_attr('check_problem')
choose_new_seed = module_attr('choose_new_seed')
closed = module_attr('closed')
get_answer = module_attr('get_answer')
get_problem = module_attr('get_problem')
get_problem_html = module_attr('get_problem_html')
get_state_for_lcp = module_attr('get_state_for_lcp')
handle_input_ajax = module_attr('handle_input_ajax')
handle_problem_html_error = module_attr('handle_problem_html_error')
handle_ungraded_response = module_attr('handle_ungraded_response')
is_attempted = module_attr('is_attempted')
is_correct = module_attr('is_correct')
is_past_due = module_attr('is_past_due')
is_submitted = module_attr('is_submitted')
lcp = module_attr('lcp')
make_dict_of_responses = module_attr('make_dict_of_responses')
new_lcp = module_attr('new_lcp')
publish_grade = module_attr('publish_grade')
rescore_problem = module_attr('rescore_problem')
reset_problem = module_attr('reset_problem')
save_problem = module_attr('save_problem')
set_state_from_lcp = module_attr('set_state_from_lcp')
should_show_check_button = module_attr('should_show_check_button')
should_show_reset_button = module_attr('should_show_reset_button')
should_show_save_button = module_attr('should_show_save_button')
update_score = module_attr('update_score')
| agpl-3.0 |
breznak/nupic | src/nupic/swarming/HypersearchV2.py | 31 | 169327 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import logging
import json
import hashlib
import itertools
import StringIO
import shutil
import tempfile
import copy
import pprint
from operator import itemgetter
from nupic.frameworks.opf import opfhelpers
from nupic.swarming.hypersearch.utils import sortedJSONDumpS, rApply, rCopy
from nupic.support.configuration import Configuration
from nupic.swarming.hypersearch.utils import clippedObj
from nupic.swarming.hypersearch.errorcodes import ErrorCodes
from nupic.swarming.hypersearch.experimentutils import InferenceType
from nupic.database.ClientJobsDAO import (
ClientJobsDAO, InvalidConnectionException)
from nupic.swarming.hypersearch.utils import (runModelGivenBaseAndParams,
runDummyModel)
from nupic.swarming.permutationhelpers import *
from nupic.swarming.exp_generator.ExpGenerator import expGenerator
def _flattenKeys(keys):
return '|'.join(keys)
class SwarmTerminator(object):
"""Class that records the performane of swarms in a sprint and makes
decisions about which swarms should stop running. This is a usful optimization
that identifies field combinations that no longer need to be run.
"""
MATURITY_WINDOW = None
MAX_GENERATIONS = None
_DEFAULT_MILESTONES = [1.0 / (x + 1) for x in xrange(12)]
def __init__(self, milestones=None, logLevel=None):
# Set class constants.
self.MATURITY_WINDOW = int(Configuration.get(
"nupic.hypersearch.swarmMaturityWindow"))
self.MAX_GENERATIONS = int(Configuration.get(
"nupic.hypersearch.swarmMaxGenerations"))
if self.MAX_GENERATIONS < 0:
self.MAX_GENERATIONS = None
# Set up instsance variables.
self._isTerminationEnabled = bool(int(Configuration.get(
'nupic.hypersearch.enableSwarmTermination')))
self.swarmBests = dict()
self.swarmScores = dict()
self.terminatedSwarms = set([])
self._logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
if milestones is not None:
self.milestones = milestones
else:
self.milestones = copy.deepcopy(self._DEFAULT_MILESTONES)
def recordDataPoint(self, swarmId, generation, errScore):
"""Record the best score for a swarm's generation index (x)
Returns list of swarmIds to terminate.
"""
terminatedSwarms = []
# Append score to existing swarm.
if swarmId in self.swarmScores:
entry = self.swarmScores[swarmId]
assert(len(entry) == generation)
entry.append(errScore)
entry = self.swarmBests[swarmId]
entry.append(min(errScore, entry[-1]))
assert(len(self.swarmBests[swarmId]) == len(self.swarmScores[swarmId]))
else:
# Create list of scores for a new swarm
assert (generation == 0)
self.swarmScores[swarmId] = [errScore]
self.swarmBests[swarmId] = [errScore]
# If the current swarm hasn't completed at least MIN_GENERATIONS, it should
# not be candidate for maturation or termination. This prevents the initial
# allocation of particles in PSO from killing off a field combination too
# early.
if generation + 1 < self.MATURITY_WINDOW:
return terminatedSwarms
# If the swarm has completed more than MAX_GENERATIONS, it should be marked
# as mature, regardless of how its value is changing.
if self.MAX_GENERATIONS is not None and generation > self.MAX_GENERATIONS:
self._logger.info(
'Swarm %s has matured (more than %d generations). Stopping' %
(swarmId, self.MAX_GENERATIONS))
terminatedSwarms.append(swarmId)
if self._isTerminationEnabled:
terminatedSwarms.extend(self._getTerminatedSwarms(generation))
# Return which swarms to kill when we've reached maturity
# If there is no change in the swarm's best for some time,
# Mark it dead
cumulativeBestScores = self.swarmBests[swarmId]
if cumulativeBestScores[-1] == cumulativeBestScores[-self.MATURITY_WINDOW]:
self._logger.info('Swarm %s has matured (no change in %d generations).'
'Stopping...'% (swarmId, self.MATURITY_WINDOW))
terminatedSwarms.append(swarmId)
self.terminatedSwarms = self.terminatedSwarms.union(terminatedSwarms)
return terminatedSwarms
def numDataPoints(self, swarmId):
if swarmId in self.swarmScores:
return len(self.swarmScores[swarmId])
else:
return 0
def _getTerminatedSwarms(self, generation):
terminatedSwarms = []
generationScores = dict()
for swarm, scores in self.swarmScores.iteritems():
if len(scores) > generation and swarm not in self.terminatedSwarms:
generationScores[swarm] = scores[generation]
if len(generationScores) == 0:
return
bestScore = min(generationScores.values())
tolerance = self.milestones[generation]
for swarm, score in generationScores.iteritems():
if score > (1 + tolerance) * bestScore:
self._logger.info('Swarm %s is doing poorly at generation %d.\n'
'Current Score:%s \n'
'Best Score:%s \n'
'Tolerance:%s. Stopping...',
swarm, generation, score, bestScore, tolerance)
terminatedSwarms.append(swarm)
return terminatedSwarms
class ResultsDB(object):
"""This class holds all the information we have accumulated on completed
models, which particles were used, etc.
When we get updated results sent to us (via recordModelProgress), we
record it here for access later by various functions in this module.
"""
def __init__(self, hsObj):
""" Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance
"""
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict()
def update(self, modelID, modelParams, modelParamsHash, metricResult,
completed, completionReason, matured, numRecords):
""" Insert a new entry or update an existing one. If this is an update
of an existing entry, then modelParams will be None
Parameters:
--------------------------------------------------------------------
modelID: globally unique modelID of this model
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for
a description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
metricResult: value on the optimizeMetric for this model.
May be None if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured
numRecords: Number of records that have been processed so far by this
model.
retval: Canonicalized result on the optimize metric
"""
# The modelParamsHash must always be provided - it can change after a
# model is inserted into the models table if it got detected as an
# orphan
assert (modelParamsHash is not None)
# We consider a model metricResult as "final" if it has completed or
# matured. By default, assume anything that has completed has matured
if completed:
matured = True
# Get the canonicalized optimize metric results. For this metric, lower
# is always better
if metricResult is not None and matured and \
completionReason in [ClientJobsDAO.CMPL_REASON_EOF,
ClientJobsDAO.CMPL_REASON_STOPPED]:
# Canonicalize the error score so that lower is better
if self._hsObj._maximize:
errScore = -1 * metricResult
else:
errScore = metricResult
if errScore < self._bestResult:
self._bestResult = errScore
self._bestModelID = modelID
self._hsObj.logger.info("New best model after %d evaluations: errScore "
"%g on model %s" % (len(self._allResults), self._bestResult,
self._bestModelID))
else:
errScore = numpy.inf
# If this model completed with an unacceptable completion reason, set the
# errScore to infinite and essentially make this model invisible to
# further queries
if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]:
errScore = numpy.inf
hidden = True
else:
hidden = False
# Update our set of erred models and completed models. These are used
# to determine if we should abort the search because of too many errors
if completed:
self._completedModels.add(modelID)
self._numCompletedModels = len(self._completedModels)
if completionReason == ClientJobsDAO.CMPL_REASON_ERROR:
self._errModels.add(modelID)
self._numErrModels = len(self._errModels)
# Are we creating a new entry?
wasHidden = False
if modelID not in self._modelIDToIdx:
assert (modelParams is not None)
entry = dict(modelID=modelID, modelParams=modelParams,
modelParamsHash=modelParamsHash,
errScore=errScore, completed=completed,
matured=matured, numRecords=numRecords, hidden=hidden)
self._allResults.append(entry)
entryIdx = len(self._allResults) - 1
self._modelIDToIdx[modelID] = entryIdx
self._paramsHashToIndexes[modelParamsHash] = entryIdx
swarmId = modelParams['particleState']['swarmId']
if not hidden:
# Update the list of particles in each swarm
if swarmId in self._swarmIdToIndexes:
self._swarmIdToIndexes[swarmId].append(entryIdx)
else:
self._swarmIdToIndexes[swarmId] = [entryIdx]
# Update number of particles at each generation in this swarm
genIdx = modelParams['particleState']['genIdx']
numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0])
while genIdx >= len(numPsEntry):
numPsEntry.append(0)
numPsEntry[genIdx] += 1
self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry
# Replacing an existing one
else:
entryIdx = self._modelIDToIdx.get(modelID, None)
assert (entryIdx is not None)
entry = self._allResults[entryIdx]
wasHidden = entry['hidden']
# If the paramsHash changed, note that. This can happen for orphaned
# models
if entry['modelParamsHash'] != modelParamsHash:
self._paramsHashToIndexes.pop(entry['modelParamsHash'])
self._paramsHashToIndexes[modelParamsHash] = entryIdx
entry['modelParamsHash'] = modelParamsHash
# Get the model params, swarmId, and genIdx
modelParams = entry['modelParams']
swarmId = modelParams['particleState']['swarmId']
genIdx = modelParams['particleState']['genIdx']
# If this particle just became hidden, remove it from our swarm counts
if hidden and not wasHidden:
assert (entryIdx in self._swarmIdToIndexes[swarmId])
self._swarmIdToIndexes[swarmId].remove(entryIdx)
self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1
# Update the entry for the latest info
entry['errScore'] = errScore
entry['completed'] = completed
entry['matured'] = matured
entry['numRecords'] = numRecords
entry['hidden'] = hidden
# Update the particle best errScore
particleId = modelParams['particleState']['id']
genIdx = modelParams['particleState']['genIdx']
if matured and not hidden:
(oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None))
if errScore < oldResult:
pos = Particle.getPositionFromState(modelParams['particleState'])
self._particleBest[particleId] = (errScore, pos)
# Update the particle latest generation index
prevGenIdx = self._particleLatestGenIdx.get(particleId, -1)
if not hidden and genIdx > prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx
elif hidden and not wasHidden and genIdx == prevGenIdx:
self._particleLatestGenIdx[particleId] = genIdx-1
# Update the swarm best score
if not hidden:
swarmId = modelParams['particleState']['swarmId']
if not swarmId in self._swarmBestOverall:
self._swarmBestOverall[swarmId] = []
bestScores = self._swarmBestOverall[swarmId]
while genIdx >= len(bestScores):
bestScores.append((None, numpy.inf))
if errScore < bestScores[genIdx][1]:
bestScores[genIdx] = (modelID, errScore)
# Update the self._modifiedSwarmGens flags to support the
# getMaturedSwarmGenerations() call.
if not hidden:
key = (swarmId, genIdx)
if not key in self._maturedSwarmGens:
self._modifiedSwarmGens.add(key)
return errScore
def getNumErrModels(self):
"""Return number of models that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return self._numErrModels
def getErrModelIds(self):
"""Return list of models IDs that completed with errors.
Parameters:
---------------------------------------------------------------------
retval: # if models
"""
return list(self._errModels)
def getNumCompletedModels(self):
"""Return total number of models that completed.
Parameters:
---------------------------------------------------------------------
retval: # if models that completed
"""
return self._numCompletedModels
def getModelIDFromParamsHash(self, paramsHash):
""" Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
"""
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
def numModels(self, swarmId=None, includeHidden=False):
"""Return the total # of models we have in our database (if swarmId is
None) or in a specific swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders
in this swarm. For example '__address_encoder.__gym_encoder'
includeHidden: If False, this will only return the number of models
that are not hidden (i.e. orphanned, etc.)
retval: numModels
"""
# Count all models
if includeHidden:
if swarmId is None:
return len(self._allResults)
else:
return len(self._swarmIdToIndexes.get(swarmId, []))
# Only count non-hidden models
else:
if swarmId is None:
entries = self._allResults
else:
entries = [self._allResults[entryIdx]
for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]
return len([entry for entry in entries if not entry['hidden']])
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None):
"""Return the model ID of the model with the best result so far and
it's score on the optimize metric. If swarm is None, then it returns
the global best, otherwise it returns the best for the given swarm
for all generatons up to and including genIdx.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: consider the best in all generations up to and including this
generation if not None.
retval: (modelID, result)
"""
if swarmId is None:
return (self._bestModelID, self._bestResult)
else:
if swarmId not in self._swarmBestOverall:
return (None, numpy.inf)
# Get the best score, considering the appropriate generations
genScores = self._swarmBestOverall[swarmId]
bestModelId = None
bestScore = numpy.inf
for (i, (modelId, errScore)) in enumerate(genScores):
if genIdx is not None and i > genIdx:
break
if errScore < bestScore:
bestScore = errScore
bestModelId = modelId
return (bestModelId, bestScore)
def getParticleInfo(self, modelId):
"""Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
"""
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,
matured=None, lastDescendent=False):
"""Return a list of particleStates for all particles we know about in
the given swarm, their model Ids, and metric results.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
completed: If not None, only return particles of the given state (either
completed if 'completed' is True, or running if 'completed'
is false
matured: If not None, only return particles of the given state (either
matured if 'matured' is True, or not matured if 'matured'
is false. Note that any model which has completed is also
considered matured.
lastDescendent: If True, only return particles that are the last descendent,
that is, the highest generation index for a given particle Id
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
# The indexes of all the models in this swarm. This list excludes hidden
# (orphaned) models.
if swarmId is not None:
entryIdxs = self._swarmIdToIndexes.get(swarmId, [])
else:
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
entry = self._allResults[idx]
# If this entry is hidden (i.e. it was an orphaned model), it should
# not be in this list
if swarmId is not None:
assert (not entry['hidden'])
# Get info on this model
modelParams = entry['modelParams']
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
if completed is not None and (completed != isCompleted):
continue
if matured is not None and (matured != isMatured):
continue
if lastDescendent \
and (self._particleLatestGenIdx[particleId] != particleGenIdx):
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getOrphanParticleInfos(self, swarmId, genIdx):
"""Return a list of particleStates for all particles in the given
swarm generation that have been orphaned.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
genIdx: If not None, only return particles at this specific generation
index.
retval: (particleStates, modelIds, errScores, completed, matured)
particleStates: list of particleStates
modelIds: list of modelIds
errScores: list of errScores, numpy.inf is plugged in
if we don't have a result yet
completed: list of completed booleans
matured: list of matured booleans
"""
entryIdxs = range(len(self._allResults))
if len(entryIdxs) == 0:
return ([], [], [], [], [])
# Get the particles of interest
particleStates = []
modelIds = []
errScores = []
completedFlags = []
maturedFlags = []
for idx in entryIdxs:
# Get info on this model
entry = self._allResults[idx]
if not entry['hidden']:
continue
modelParams = entry['modelParams']
if modelParams['particleState']['swarmId'] != swarmId:
continue
isCompleted = entry['completed']
isMatured = entry['matured']
particleState = modelParams['particleState']
particleGenIdx = particleState['genIdx']
particleId = particleState['id']
if genIdx is not None and particleGenIdx != genIdx:
continue
# Incorporate into return values
particleStates.append(particleState)
modelIds.append(entry['modelID'])
errScores.append(entry['errScore'])
completedFlags.append(isCompleted)
maturedFlags.append(isMatured)
return (particleStates, modelIds, errScores, completedFlags, maturedFlags)
def getMaturedSwarmGenerations(self):
"""Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore)
"""
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result
def firstNonFullGeneration(self, swarmId, minNumParticles):
""" Return the generation index of the first generation in the given
swarm that does not have numParticles particles in it, either still in the
running state or completed. This does not include orphaned particles.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
minNumParticles: minium number of partices required for a full
generation.
retval: generation index, or None if no particles at all.
"""
if not swarmId in self._swarmNumParticlesPerGeneration:
return None
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
numPsPerGen = numpy.array(numPsPerGen)
firstNonFull = numpy.where(numPsPerGen < minNumParticles)[0]
if len(firstNonFull) == 0:
return len(numPsPerGen)
else:
return firstNonFull[0]
def highestGeneration(self, swarmId):
""" Return the generation index of the highest generation in the given
swarm.
Parameters:
---------------------------------------------------------------------
swarmId: A string representation of the sorted list of encoders in this
swarm. For example '__address_encoder.__gym_encoder'
retval: generation index
"""
numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]
return len(numPsPerGen)-1
def getParticleBest(self, particleId):
""" Return the best score and position for a given particle. The position
is given as a dict, with varName:varPosition items in it.
Parameters:
---------------------------------------------------------------------
particleId: which particle
retval: (bestResult, bestPosition)
"""
return self._particleBest.get(particleId, (None, None))
def getResultsPerChoice(self, swarmId, maxGenIdx, varName):
""" Return a dict of the errors obtained on models that were run with
each value from a PermuteChoice variable.
For example, if a PermuteChoice variable has the following choices:
['a', 'b', 'c']
The dict will have 3 elements. The keys are the stringified choiceVars,
and each value is tuple containing (choiceVar, errors) where choiceVar is
the original form of the choiceVar (before stringification) and errors is
the list of errors received from models that used the specific choice:
retval:
['a':('a', [0.1, 0.2, 0.3]), 'b':('b', [0.5, 0.1, 0.6]), 'c':('c', [])]
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id of the swarm to retrieve info from
maxGenIdx: max generation index to consider from other models, ignored
if None
varName: which variable to retrieve
retval: list of the errors obtained from each choice.
"""
results = dict()
# Get all the completed particles in this swarm
(allParticles, _, resultErrs, _, _) = self.getParticleInfos(swarmId,
genIdx=None, matured=True)
for particleState, resultErr in itertools.izip(allParticles, resultErrs):
# Consider this generation?
if maxGenIdx is not None:
if particleState['genIdx'] > maxGenIdx:
continue
# Ignore unless this model completed successfully
if resultErr == numpy.inf:
continue
position = Particle.getPositionFromState(particleState)
varPosition = position[varName]
varPositionStr = str(varPosition)
if varPositionStr in results:
results[varPositionStr][1].append(resultErr)
else:
results[varPositionStr] = (varPosition, [resultErr])
return results
class Particle(object):
"""Construct a particle. Each particle evaluates one or more models
serially. Each model represents a position that the particle is evaluated
at.
Each position is a set of values chosen for each of the permutation variables.
The particle's best position is the value of the permutation variables when it
did best on the optimization metric.
Some permutation variables are treated like traditional particle swarm
variables - that is they have a position and velocity. Others are simply
choice variables, for example a list of strings. We follow a different
methodology for choosing each permutation variable value depending on its
type.
A particle belongs to 1 and only 1 swarm. A swarm is a collection of particles
that all share the same global best position. A swarm is identified by its
specific combination of fields. If we are evaluating multiple different field
combinations, then there will be multiple swarms. A Hypersearch Worker (HSW)
will only instantiate and run one particle at a time. When done running a
particle, another worker can pick it up, pick a new position, for it and run
it based on the particle state information which is stored in each model table
entry.
Each particle has a generationIdx. It starts out at generation #0. Every time
a model evaluation completes and the particle is moved to a different position
(to evaluate a different model), the generation index is incremented.
Every particle that is created has a unique particleId. The particleId
is a string formed as '<workerConnectionId>.<particleIdx>', where particleIdx
starts at 0 for each worker and increments by 1 every time a new particle
is created by that worker.
"""
_nextParticleID = 0
def __init__(self, hsObj, resultsDB, flattenedPermuteVars,
swarmId=None, newFarFrom=None, evolveFromState=None,
newFromClone=None, newParticleId=False):
""" Create a particle.
There are 3 fundamentally different methods of instantiating a particle:
1.) You can instantiate a new one from scratch, at generation index #0. This
particle gets a new particleId.
required: swarmId
optional: newFarFrom
must be None: evolveFromState, newFromClone
2.) You can instantiate one from savedState, in which case it's generation
index is incremented (from the value stored in the saved state) and
its particleId remains the same.
required: evolveFromState
optional:
must be None: flattenedPermuteVars, swarmId, newFromClone
3.) You can clone another particle, creating a new particle at the same
generationIdx but a different particleId. This new particle will end
up at exactly the same position as the one it was cloned from. If
you want to move it to the next position, or just jiggle it a bit, call
newPosition() or agitate() after instantiation.
required: newFromClone
optional:
must be None: flattenedPermuteVars, swarmId, evolveFromState
Parameters:
--------------------------------------------------------------------
hsObj: The HypersearchV2 instance
resultsDB: the ResultsDB instance that holds all the model results
flattenedPermuteVars: dict() containing the (key, PermuteVariable) pairs
of the flattened permutation variables as read from the permutations
file.
swarmId: String that represents the encoder names of the encoders that are
to be included in this particle's model. Of the form
'encoder1.encoder2'.
Required for creation method #1.
newFarFrom: If not None, this is a list of other particleState dicts in the
swarm that we want to be as far away from as possible. Optional
argument for creation method #1.
evolveFromState: If not None, evolve an existing particle. This is a
dict containing the particle's state. Preserve the particleId, but
increment the generation index. Required for creation method #2.
newFromClone: If not None, clone this other particle's position and generation
index, with small random perturbations. This is a dict containing the
particle's state. Required for creation method #3.
newParticleId: Only applicable when newFromClone is True. Give the clone
a new particle ID.
"""
# Save constructor arguments
self._hsObj = hsObj
self.logger = hsObj.logger
self._resultsDB = resultsDB
# See the random number generator used for all the variables in this
# particle. We will seed it differently based on the construction method,
# below.
self._rng = random.Random()
self._rng.seed(42)
# Setup our variable set by taking what's in flattenedPermuteVars and
# stripping out vars that belong to encoders we are not using.
def _setupVars(flattenedPermuteVars):
allowedEncoderNames = self.swarmId.split('.')
self.permuteVars = copy.deepcopy(flattenedPermuteVars)
# Remove fields we don't want.
varNames = self.permuteVars.keys()
for varName in varNames:
# Remove encoders we're not using
if ':' in varName: # if an encoder
if varName.split(':')[0] not in allowedEncoderNames:
self.permuteVars.pop(varName)
continue
# All PermuteChoice variables need to know all prior results obtained
# with each choice.
if isinstance(self.permuteVars[varName], PermuteChoices):
if self._hsObj._speculativeParticles:
maxGenIdx = None
else:
maxGenIdx = self.genIdx-1
resultsPerChoice = self._resultsDB.getResultsPerChoice(
swarmId=self.swarmId, maxGenIdx=maxGenIdx, varName=varName)
self.permuteVars[varName].setResultsPerChoice(
resultsPerChoice.values())
# Method #1
# Create from scratch, optionally pushing away from others that already
# exist.
if swarmId is not None:
assert (evolveFromState is None)
assert (newFromClone is None)
# Save construction param
self.swarmId = swarmId
# Assign a new unique ID to this particle
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
# Init the generation index
self.genIdx = 0
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Push away from other particles?
if newFarFrom is not None:
for varName in self.permuteVars.iterkeys():
otherPositions = []
for particleState in newFarFrom:
otherPositions.append(particleState['varStates'][varName]['position'])
self.permuteVars[varName].pushAwayFrom(otherPositions, self._rng)
# Give this particle a unique seed.
self._rng.seed(str(otherPositions))
# Method #2
# Instantiate from saved state, preserving particleId but incrementing
# generation index.
elif evolveFromState is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (newFromClone is None)
# Setup other variables from saved state
self.particleId = evolveFromState['id']
self.genIdx = evolveFromState['genIdx'] + 1
self.swarmId = evolveFromState['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# saved state
self.initStateFrom(self.particleId, evolveFromState, newBest=True)
# Move it to the next position. We need the swarm best for this.
self.newPosition()
# Method #3
# Clone another particle, producing a new particle at the same genIdx with
# the same particleID. This is used to re-run an orphaned model.
elif newFromClone is not None:
assert (swarmId is None)
assert (newFarFrom is None)
assert (evolveFromState is None)
# Setup other variables from clone particle
self.particleId = newFromClone['id']
if newParticleId:
self.particleId = "%s.%s" % (str(self._hsObj._workerID),
str(Particle._nextParticleID))
Particle._nextParticleID += 1
self.genIdx = newFromClone['genIdx']
self.swarmId = newFromClone['swarmId']
# Setup the variables to initial locations.
_setupVars(flattenedPermuteVars)
# Override the position and velocity of each variable from
# the clone
self.initStateFrom(self.particleId, newFromClone, newBest=False)
else:
assert False, "invalid creation parameters"
# Log it
self.logger.debug("Created particle: %s" % (str(self)))
def __repr__(self):
return "Particle(swarmId=%s) [particleId=%s, genIdx=%d, " \
"permuteVars=\n%s]" % (self.swarmId, self.particleId,
self.genIdx, pprint.pformat(self.permuteVars, indent=4))
def getState(self):
"""Get the particle state as a dict. This is enough information to
instantiate this particle on another worker."""
varStates = dict()
for varName, var in self.permuteVars.iteritems():
varStates[varName] = var.getState()
return dict(id = self.particleId,
genIdx = self.genIdx,
swarmId = self.swarmId,
varStates = varStates)
def initStateFrom(self, particleId, particleState, newBest):
"""Init all of our variable positions, velocities, and optionally the best
result and best position from the given particle.
If newBest is true, we get the best result and position for this new
generation from the resultsDB, This is used when evoloving a particle
because the bestResult and position as stored in was the best AT THE TIME
THAT PARTICLE STARTED TO RUN and does not include the best since that
particle completed.
"""
# Get the update best position and result?
if newBest:
(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)
else:
bestResult = bestPosition = None
# Replace with the position and velocity of each variable from
# saved state
varStates = particleState['varStates']
for varName in varStates.keys():
varState = copy.deepcopy(varStates[varName])
if newBest:
varState['bestResult'] = bestResult
if bestPosition is not None:
varState['bestPosition'] = bestPosition[varName]
self.permuteVars[varName].setState(varState)
def copyEncoderStatesFrom(self, particleState):
"""Copy all encoder variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if ':' in varName: # if an encoder
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng)
def getPosition(self):
"""Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices
"""
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result
@staticmethod
def getPositionFromState(pState):
"""Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions
"""
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result
def agitate(self):
"""Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
"""
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
"""Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position
"""
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId, genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position
class HsState(object):
"""This class encapsulates the Hypersearch state which we share with all
other workers. This state gets serialized into a JSON dict and written to
the engWorkerState field of the job record.
Whenever a worker changes this state, it does an atomic setFieldIfEqual to
insure it has the latest state as updated by any other worker as a base.
Here is an example snapshot of this state information:
swarms = {'a': {'status': 'completed', # 'active','completing','completed',
# or 'killed'
'bestModelId': <modelID>, # Only set for 'completed' swarms
'bestErrScore': <errScore>, # Only set for 'completed' swarms
'sprintIdx': 0,
},
'a.b': {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 1,
}
}
sprints = [{'status': 'completed', # 'active','completing','completed'
'bestModelId': <modelID>, # Only set for 'completed' sprints
'bestErrScore': <errScore>, # Only set for 'completed' sprints
},
{'status': 'completing',
'bestModelId': <None>,
'bestErrScore': <None>
}
{'status': 'active',
'bestModelId': None
'bestErrScore': None
}
]
"""
def __init__(self, hsObj):
""" Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID
"""
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB()
def isDirty(self):
"""Return true if our local copy of the state has changed since the
last time we read from the DB.
"""
return self._dirty
def isSearchOver(self):
"""Return true if the search should be considered over."""
return self._state['searchOver']
def readStateFromDB(self):
"""Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record
"""
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# permutations_runner).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False
def writeStateToDB(self):
"""Update the state in the job record with our local changes (if any).
If we don't have the latest state in our priorStateJSON, then re-load
in the latest state and return False. If we were successful writing out
our changes, return True
Parameters:
---------------------------------------------------------------------
retval: True if we were successful writing out our changes
False if our priorState is not the latest that was in the DB.
In this case, we will re-load our state from the DB
"""
# If no changes, do nothing
if not self._dirty:
return True
# Set the update time
self._state['lastUpdateTime'] = time.time()
newStateJSON = json.dumps(self._state)
success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,
'engWorkerState', str(newStateJSON), str(self._priorStateJSON))
if success:
self.logger.debug("Success changing hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = newStateJSON
# If no success, read in the current state from the DB
else:
self.logger.debug("Failed to change hsState to: \n%s " % \
(pprint.pformat(self._state, indent=4)))
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
self._state = json.loads(self._priorStateJSON)
self.logger.info("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._state, indent=4)))
return success
def getEncoderNameFromKey(self, key):
""" Given an encoder dictionary key, get the encoder name.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return key.split('|')[-1]
def getEncoderKeyFromName(self, name):
""" Given an encoder name, get the key.
Encoders are a sub-dict within model params, and in HSv2, their key
is structured like this for example:
'modelParams|sensorParams|encoders|home_winloss'
The encoderName is the last word in the | separated key name
"""
return 'modelParams|sensorParams|encoders|%s' % (name)
def getFieldContributions(self):
"""Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score.
"""
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict
def getAllSwarms(self, sprintIdx):
"""Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds
def getActiveSwarms(self, sprintIdx=None):
"""Return the list of active swarms in the given sprint. These are swarms
which still need new particles created in them.
Parameters:
---------------------------------------------------------------------
sprintIdx: which sprint to query. If None, get active swarms from all
sprints
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if sprintIdx is not None and info['sprintIdx'] != sprintIdx:
continue
if info['status'] == 'active':
swarmIds.append(swarmId)
return swarmIds
def getNonKilledSwarms(self, sprintIdx):
"""Return the list of swarms in the given sprint that were not killed.
This is called when we are trying to figure out which encoders to carry
forward to the next sprint. We don't want to carry forward encoder
combintations which were obviously bad (in killed swarms).
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx and info['status'] != 'killed':
swarmIds.append(swarmId)
return swarmIds
def getCompletedSwarms(self):
"""Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds
def bestModelInCompletedSwarm(self, swarmId):
"""Return the best model ID and it's errScore from the given swarm.
If the swarm has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
swarmInfo = self._state['swarms'][swarmId]
return (swarmInfo['bestModelId'],
swarmInfo['bestErrScore'])
def bestModelInCompletedSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint.
If the sprint has not completed yet, the bestModelID will be None.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
sprintInfo = self._state['sprints'][sprintIdx]
return (sprintInfo['bestModelId'],
sprintInfo['bestErrScore'])
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
def setSwarmState(self, swarmId, newStatus):
"""Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed'
"""
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True
def anyGoodSprintsActive(self):
"""Return True if there are any more good sprints still being explored.
A 'good' sprint is one that is earlier than where we detected an increase
in error from sprint to subsequent sprint.
"""
if self._state['lastGoodSprint'] is not None:
goodSprints = self._state['sprints'][0:self._state['lastGoodSprint']+1]
else:
goodSprints = self._state['sprints']
for sprint in goodSprints:
if sprint['status'] == 'active':
anyActiveSprints = True
break
else:
anyActiveSprints = False
return anyActiveSprints
def isSprintCompleted(self, sprintIdx):
"""Return True if the given sprint has completed."""
numExistingSprints = len(self._state['sprints'])
if sprintIdx >= numExistingSprints:
return False
return (self._state['sprints'][sprintIdx]['status'] == 'completed')
def killUselessSwarms(self):
"""See if we can kill off some speculative swarms. If an earlier sprint
has finally completed, we can now tell which fields should *really* be present
in the sprints we've already started due to speculation, and kill off the
swarms that should not have been included.
"""
# Get number of existing sprints
numExistingSprints = len(self._state['sprints'])
# Should we bother killing useless swarms?
if self._hsObj._searchType == HsSearchType.legacyTemporal:
if numExistingSprints <= 2:
return
else:
if numExistingSprints <= 1:
return
# Form completedSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# ex. completedSwarms:
# [('a', {...}, 1.4),
# ('b', {...}, 2.0),
# ('c', {...}, 3.0)]
completedSwarms = self.getCompletedSwarms()
completedSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in completedSwarms]
# Form the completedMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore).
# ex. completedMatrix:
# [(('a', {...}, 1.4), ('b', {...}, 2.0), ('c', {...}, 3.0)),
# (('a.b', {...}, 3.0), ('b.c', {...}, 4.0))]
completedMatrix = [[] for i in range(numExistingSprints)]
for swarm in completedSwarms:
completedMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in completedMatrix:
sprint.sort(key=itemgetter(2))
# Form activeSwarms as a list of tuples, each tuple contains:
# (swarmName, swarmState, swarmBestErrScore)
# Include all activeSwarms and completingSwarms
# ex. activeSwarms:
# [('d', {...}, 1.4),
# ('e', {...}, 2.0),
# ('f', {...}, 3.0)]
activeSwarms = self.getActiveSwarms()
# Append the completing swarms
activeSwarms.extend(self.getCompletingSwarms())
activeSwarms = [(swarm, self._state["swarms"][swarm],
self._state["swarms"][swarm]["bestErrScore"]) \
for swarm in activeSwarms]
# Form the activeMatrix. Each row corresponds to a sprint. Each row
# contains the list of swarm tuples that belong to that sprint, sorted
# by best score. Each swarm tuple contains (swarmName, swarmState,
# swarmBestErrScore)
# ex. activeMatrix:
# [(('d', {...}, 1.4), ('e', {...}, 2.0), ('f', {...}, 3.0)),
# (('d.e', {...}, 3.0), ('e.f', {...}, 4.0))]
activeMatrix = [[] for i in range(numExistingSprints)]
for swarm in activeSwarms:
activeMatrix[swarm[1]["sprintIdx"]].append(swarm)
for sprint in activeMatrix:
sprint.sort(key=itemgetter(2))
# Figure out which active swarms to kill
toKill = []
for i in range(1, numExistingSprints):
for swarm in activeMatrix[i]:
curSwarmEncoders = swarm[0].split(".")
# If previous sprint is complete, get the best swarm and kill all active
# sprints that are not supersets
if(len(activeMatrix[i-1])==0):
# If we are trying all possible 3 field combinations, don't kill any
# off in sprint 2
if i==2 and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
pass
else:
bestInPrevious = completedMatrix[i-1][0]
bestEncoders = bestInPrevious[0].split('.')
for encoder in bestEncoders:
if not encoder in curSwarmEncoders:
toKill.append(swarm)
# if there are more than two completed encoders sets that are complete and
# are worse than at least one active swarm in the previous sprint. Remove
# any combinations that have any pair of them since they cannot have the best encoder.
#elif(len(completedMatrix[i-1])>1):
# for completedSwarm in completedMatrix[i-1]:
# activeMatrix[i-1][0][2]<completed
# Mark the bad swarms as killed
if len(toKill) > 0:
print "ParseMe: Killing encoders:" + str(toKill)
for swarm in toKill:
self.setSwarmState(swarm[0], "killed")
return
def isSprintActive(self, sprintIdx):
"""If the given sprint exists and is active, return active=True.
If the sprint does not exist yet, this call will create it (and return
active=True). If it already exists, but is completing or complete, return
active=False.
If sprintIdx is past the end of the possible sprints, return
active=False, noMoreSprints=True
IMPORTANT: When speculative particles are enabled, this call has some
special processing to handle speculative sprints:
* When creating a new speculative sprint (creating sprint N before
sprint N-1 has completed), it initially only puts in only ONE swarm into
the sprint.
* Every time it is asked if sprint N is active, it also checks to see if
it is time to add another swarm to the sprint, and adds a new swarm if
appropriate before returning active=True
* We decide it is time to add a new swarm to a speculative sprint when ALL
of the currently active swarms in the sprint have all the workers they
need (number of running (not mature) particles is _minParticlesPerSwarm).
This means that we have capacity to run additional particles in a new
swarm.
It is expected that the sprints will be checked IN ORDER from 0 on up. (It
is an error not to) The caller should always try to allocate from the first
active sprint it finds. If it can't, then it can call this again to
find/create the next active sprint.
Parameters:
---------------------------------------------------------------------
retval: (active, noMoreSprints)
active: True if the given sprint is active
noMoreSprints: True if there are no more sprints possible
"""
while True:
numExistingSprints = len(self._state['sprints'])
# If this sprint already exists, see if it is active
if sprintIdx <= numExistingSprints-1:
# With speculation off, it's simple, just return whether or not the
# asked for sprint has active status
if not self._hsObj._speculativeParticles:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
return (active, False)
# With speculation on, if the sprint is still marked active, we also
# need to see if it's time to add a new swarm to it.
else:
active = (self._state['sprints'][sprintIdx]['status'] == 'active')
if not active:
return (active, False)
# See if all of the existing swarms are at capacity (have all the
# workers they need):
activeSwarmIds = self.getActiveSwarms(sprintIdx)
swarmSizes = [self._hsObj._resultsDB.getParticleInfos(swarmId,
matured=False)[0] for swarmId in activeSwarmIds]
notFullSwarms = [len(swarm) for swarm in swarmSizes \
if len(swarm) < self._hsObj._minParticlesPerSwarm]
# If some swarms have room return that the swarm is active.
if len(notFullSwarms) > 0:
return (True, False)
# If the existing swarms are at capacity, we will fall through to the
# logic below which tries to add a new swarm to the sprint.
# Stop creating new sprints?
if self._state['lastGoodSprint'] is not None:
return (False, True)
# if fixedFields is set, we are running a fast swarm and only run sprint0
if self._hsObj._fixedFields is not None:
return (False, True)
# ----------------------------------------------------------------------
# Get the best model (if there is one) from the prior sprint. That gives
# us the base encoder set for the next sprint. For sprint zero make sure
# it does not take the last sprintidx because of wrapping.
if sprintIdx > 0 \
and self._state['sprints'][sprintIdx-1]['status'] == 'completed':
(bestModelId, _) = self.bestModelInCompletedSprint(sprintIdx-1)
(particleState, _, _, _, _) = self._hsObj._resultsDB.getParticleInfo(
bestModelId)
bestSwarmId = particleState['swarmId']
baseEncoderSets = [bestSwarmId.split('.')]
# If there is no best model yet, then use all encoder sets from the prior
# sprint that were not killed
else:
bestSwarmId = None
particleState = None
# Build up more combinations, using ALL of the sets in the current
# sprint.
baseEncoderSets = []
for swarmId in self.getNonKilledSwarms(sprintIdx-1):
baseEncoderSets.append(swarmId.split('.'))
# ----------------------------------------------------------------------
# Which encoders should we add to the current base set?
encoderAddSet = []
# If we have constraints on how many fields we carry forward into
# subsequent sprints (either nupic.hypersearch.max.field.branching or
# nupic.hypersearch.min.field.contribution was set), then be more
# picky about which fields we add in.
limitFields = False
if self._hsObj._maxBranching > 0 \
or self._hsObj._minFieldContribution >= 0:
if self._hsObj._searchType == HsSearchType.temporal or \
self._hsObj._searchType == HsSearchType.classification:
if sprintIdx >= 1:
limitFields = True
baseSprintIdx = 0
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
if sprintIdx >= 2:
limitFields = True
baseSprintIdx = 1
else:
raise RuntimeError("Unimplemented search type %s" % \
(self._hsObj._searchType))
# Only add top _maxBranching encoders to the swarms?
if limitFields:
# Get field contributions to filter added fields
pctFieldContributions, absFieldContributions = \
self.getFieldContributions()
toRemove = []
self.logger.debug("FieldContributions min: %s" % \
(self._hsObj._minFieldContribution))
for fieldname in pctFieldContributions:
if pctFieldContributions[fieldname] < self._hsObj._minFieldContribution:
self.logger.debug("FieldContributions removing: %s" % (fieldname))
toRemove.append(self.getEncoderKeyFromName(fieldname))
else:
self.logger.debug("FieldContributions keeping: %s" % (fieldname))
# Grab the top maxBranching base sprint swarms.
swarms = self._state["swarms"]
sprintSwarms = [(swarm, swarms[swarm]["bestErrScore"]) \
for swarm in swarms if swarms[swarm]["sprintIdx"] == baseSprintIdx]
sprintSwarms = sorted(sprintSwarms, key=itemgetter(1))
if self._hsObj._maxBranching > 0:
sprintSwarms = sprintSwarms[0:self._hsObj._maxBranching]
# Create encoder set to generate further swarms.
for swarm in sprintSwarms:
swarmEncoders = swarm[0].split(".")
for encoder in swarmEncoders:
if not encoder in encoderAddSet:
encoderAddSet.append(encoder)
encoderAddSet = [encoder for encoder in encoderAddSet \
if not str(encoder) in toRemove]
# If no limit on the branching or min contribution, simply use all of the
# encoders.
else:
encoderAddSet = self._hsObj._encoderNames
# -----------------------------------------------------------------------
# Build up the new encoder combinations for the next sprint.
newSwarmIds = set()
# See if the caller wants to try more extensive field combinations with
# 3 fields.
if (self._hsObj._searchType == HsSearchType.temporal \
or self._hsObj._searchType == HsSearchType.legacyTemporal) \
and sprintIdx == 2 \
and (self._hsObj._tryAll3FieldCombinations or \
self._hsObj._tryAll3FieldCombinationsWTimestamps):
if self._hsObj._tryAll3FieldCombinations:
newEncoders = set(self._hsObj._encoderNames)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
else:
# Just make sure the timestamp encoders are part of the mix
newEncoders = set(encoderAddSet)
if self._hsObj._predictedFieldEncoder in newEncoders:
newEncoders.remove(self._hsObj._predictedFieldEncoder)
for encoder in self._hsObj._encoderNames:
if encoder.endswith('_timeOfDay') or encoder.endswith('_weekend') \
or encoder.endswith('_dayOfWeek'):
newEncoders.add(encoder)
allCombos = list(itertools.combinations(newEncoders, 2))
for combo in allCombos:
newSet = list(combo)
newSet.append(self._hsObj._predictedFieldEncoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# Else, we only build up by adding 1 new encoder to the best combination(s)
# we've seen from the prior sprint
else:
for baseEncoderSet in baseEncoderSets:
for encoder in encoderAddSet:
if encoder not in self._state['blackListedEncoders'] \
and encoder not in baseEncoderSet:
newSet = list(baseEncoderSet)
newSet.append(encoder)
newSet.sort()
newSwarmId = '.'.join(newSet)
if newSwarmId not in self._state['swarms']:
newSwarmIds.add(newSwarmId)
# If a speculative sprint, only add the first encoder, if not add
# all of them.
if (len(self.getActiveSwarms(sprintIdx-1)) > 0):
break
# ----------------------------------------------------------------------
# Sort the new swarm Ids
newSwarmIds = sorted(newSwarmIds)
# If no more swarms can be found for this sprint...
if len(newSwarmIds) == 0:
# if sprint is not an empty sprint return that it is active but do not
# add anything to it.
if len(self.getAllSwarms(sprintIdx)) > 0:
return (True, False)
# If this is an empty sprint and we couldn't find any new swarms to
# add (only bad fields are remaining), the search is over
else:
return (False, True)
# Add this sprint and the swarms that are in it to our state
self._dirty = True
# Add in the new sprint if necessary
if len(self._state["sprints"]) == sprintIdx:
self._state['sprints'].append({'status': 'active',
'bestModelId': None,
'bestErrScore': None})
# Add in the new swarm(s) to the sprint
for swarmId in newSwarmIds:
self._state['swarms'][swarmId] = {'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': sprintIdx}
# Update the list of active swarms
self._state['activeSwarms'] = self.getActiveSwarms()
# Try to set new state
success = self.writeStateToDB()
# Return result if successful
if success:
return (True, False)
# No success, loop back with the updated state and try again
class HsSearchType(object):
"""This class enumerates the types of search we can perform."""
temporal = 'temporal'
legacyTemporal = 'legacyTemporal'
classification = 'classification'
class HypersearchV2(object):
"""The v2 Hypersearch implementation. This is one example of a Hypersearch
implementation that can be used by the HypersearchWorker. Other implementations
just have to implement the following methods:
createModels()
recordModelProgress()
getPermutationVariables()
getComplexVariableLabelLookupDict()
This implementation uses a hybrid of Particle Swarm Optimization (PSO) and
the old "ronamatic" logic from Hypersearch V1. Variables which are lists of
choices (i.e. string values, integer values that represent different
categories) are searched using the ronamatic logic whereas floats and
integers that represent a range of values are searched using PSO.
For prediction experiments, this implementation starts out evaluating only
single encoder models that encode the predicted field. This is the first
"sprint". Once it finds the optimum set of variables for that, it starts to
build up by adding in combinations of 2 fields (the second "sprint"), where
one of them is the predicted field. Once the top 2-field combination(s) are
discovered, it starts to build up on those by adding in a 3rd field, etc.
Each new set of field combinations is called a sprint.
For classification experiments, this implementation starts out evaluating two
encoder models, where one of the encoders is the classified field. This is the
first "sprint". Once it finds the optimum set of variables for that, it starts
to build up by evauating combinations of 3 fields (the second "sprint"), where
two of them are the best 2 fields found in the first sprint (one of those of
course being the classified field). Once the top 3-field combination(s) are
discovered, it starts to build up on those by adding in a 4th field, etc.
In classification models, the classified field, although it has an encoder, is
not sent "into" the network. Rather, the encoded value just goes directly to
the classifier as the classifier input.
At any one time, there are 1 or more swarms being evaluated at the same time -
each swarm representing a certain field combination within the sprint. We try
to load balance the swarms and have the same number of models evaluated for
each swarm at any one time. Each swarm contains N particles, and we also try
to keep N >= some mininum number. Each position of a particle corresponds to a
model.
When a worker is ready to evaluate a new model, it first picks the swarm with
the least number of models so far (least number of evaluated particle
positions). If that swarm does not have the min number of particles in it yet,
or does not yet have a particle created by this worker, the worker will create
a new particle, else it will choose another particle from that swarm that it
had created in the past which has the least number of evaluated positions so
far.
"""
def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,
logLevel=None):
"""Instantiate the HyperseachV2 instance.
Parameters:
----------------------------------------------------------------------
searchParams: a dict of the job's search parameters. The format is:
persistentJobGUID: REQUIRED.
Persistent, globally-unique identifier for this job
for use in constructing persistent model checkpoint
keys. MUST be compatible with S3 key-naming rules, but
MUST NOT contain forward slashes. This GUID is
expected to retain its global uniqueness across
clusters and cluster software updates (unlike the
record IDs in the Engine's jobs table, which recycle
upon table schema change and software update). In the
future, this may also be instrumental for checkpoint
garbage collection.
permutationsPyFilename:
OPTIONAL - path to permutations.py file
permutationsPyContents:
OPTIONAL - JSON encoded string with
contents of permutations.py file
descriptionPyContents:
OPTIONAL - JSON encoded string with
contents of base description.py file
description: OPTIONAL - JSON description of the search
createCheckpoints: OPTIONAL - Whether to create checkpoints
useTerminators OPTIONAL - True of False (default config.xml). When set
to False, the model and swarm terminators
are disabled
maxModels: OPTIONAL - max # of models to generate
NOTE: This is a deprecated location for this
setting. Now, it should be specified through
the maxModels variable within the permutations
file, or maxModels in the JSON description
dummyModel: OPTIONAL - Either (True/False) or a dict of parameters
for a dummy model. If this key is absent,
a real model is trained.
See utils.py/OPFDummyModel runner for the
schema of the dummy parameters
speculativeParticles OPTIONAL - True or False (default obtained from
nupic.hypersearch.speculative.particles.default
configuration property). See note below.
NOTE: The caller must provide just ONE of the following to describe the
hypersearch:
1.) permutationsPyFilename
OR 2.) permutationsPyContents & permutationsPyContents
OR 3.) description
The schema for the description element can be found at:
"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json"
NOTE about speculativeParticles: If true (not 0), hypersearch workers will
go ahead and create and run particles in subsequent sprints and
generations before the current generation or sprint has been completed. If
false, a worker will wait in a sleep loop until the current generation or
sprint has finished before choosing the next particle position or going
into the next sprint. When true, the best model can be found faster, but
results are less repeatable due to the randomness of when each worker
completes each particle. This property can be overridden via the
speculativeParticles element of the Hypersearch job params.
workerID: our unique Hypersearch worker ID
cjDAO: ClientJobsDB Data Access Object
jobID: job ID for this hypersearch job
logLevel: override logging level to this value, if not None
"""
# Instantiate our logger
self.logger = logging.getLogger(".".join( ['com.numenta',
self.__class__.__module__, self.__class__.__name__]))
# Override log level?
if logLevel is not None:
self.logger.setLevel(logLevel)
# This is how to check the logging level
#if self.logger.getEffectiveLevel() <= logging.DEBUG:
# print "at debug level"
# Init random seed
random.seed(42)
# Save the search info
self._searchParams = searchParams
self._workerID = workerID
self._cjDAO = cjDAO
self._jobID = jobID
# Log search params
self.logger.info("searchParams: \n%s" % (pprint.pformat(
clippedObj(searchParams))))
self._createCheckpoints = self._searchParams.get('createCheckpoints',
False)
self._maxModels = self._searchParams.get('maxModels', None)
if self._maxModels == -1:
self._maxModels = None
self._predictionCacheMaxRecords = self._searchParams.get('predictionCacheMaxRecords', None)
# Speculative particles?
self._speculativeParticles = self._searchParams.get('speculativeParticles',
bool(int(Configuration.get(
'nupic.hypersearch.speculative.particles.default'))))
self._speculativeWaitSecondsMax = float(Configuration.get(
'nupic.hypersearch.speculative.particles.sleepSecondsMax'))
# Maximum Field Branching
self._maxBranching= int(Configuration.get(
'nupic.hypersearch.max.field.branching'))
# Minimum Field Contribution
self._minFieldContribution= float(Configuration.get(
'nupic.hypersearch.min.field.contribution'))
# This gets set if we detect that the job got cancelled
self._jobCancelled = False
# Use terminators (typically set by permutations_runner.py)
if 'useTerminators' in self._searchParams:
useTerminators = self._searchParams['useTerminators']
useTerminators = str(int(useTerminators))
Configuration.set('nupic.hypersearch.enableModelTermination', useTerminators)
Configuration.set('nupic.hypersearch.enableModelMaturity', useTerminators)
Configuration.set('nupic.hypersearch.enableSwarmTermination', useTerminators)
# Special test mode?
if 'NTA_TEST_exitAfterNModels' in os.environ:
self._maxModels = int(os.environ['NTA_TEST_exitAfterNModels'])
self._dummyModel = self._searchParams.get('dummyModel', None)
# Holder for temporary directory, if any, that needs to be cleaned up
# in our close() method.
self._tempDir = None
try:
# Get the permutations info. This can be either:
# 1.) JSON encoded search description (this will be used to generate a
# permutations.py and description.py files using ExpGenerator)
# 2.) path to a pre-generated permutations.py file. The description.py is
# assumed to be in the same directory
# 3.) contents of the permutations.py and descrption.py files.
if 'description' in self._searchParams:
if ('permutationsPyFilename' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
# Calculate training period for anomaly models
searchParamObj = self._searchParams
anomalyParams = searchParamObj['description'].get('anomalyParams',
dict())
# This is used in case searchParamObj['description']['anomalyParams']
# is set to None.
if anomalyParams is None:
anomalyParams = dict()
if (('autoDetectWaitRecords' not in anomalyParams) or
(anomalyParams['autoDetectWaitRecords'] is None)):
streamDef = self._getStreamDef(searchParamObj['description'])
from nupic.data.stream_reader import StreamReader
try:
streamReader = StreamReader(streamDef, isBlocking=False,
maxTimeout=0, eofOnTimeout=True)
anomalyParams['autoDetectWaitRecords'] = \
streamReader.getDataRowCount()
except Exception:
anomalyParams['autoDetectWaitRecords'] = None
self._searchParams['description']['anomalyParams'] = anomalyParams
# Call the experiment generator to generate the permutations and base
# description file.
outDir = self._tempDir = tempfile.mkdtemp()
expGenerator([
'--description=%s' % (
json.dumps(self._searchParams['description'])),
'--version=v2',
'--outDir=%s' % (outDir)])
# Get the name of the permutations script.
permutationsScript = os.path.join(outDir, 'permutations.py')
elif 'permutationsPyFilename' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyContents' in self._searchParams or
'descriptionPyContents' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or "
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
permutationsScript = self._searchParams['permutationsPyFilename']
elif 'permutationsPyContents' in self._searchParams:
if ('description' in self._searchParams or
'permutationsPyFilename' in self._searchParams):
raise RuntimeError(
"Either 'description', 'permutationsPyFilename' or"
"'permutationsPyContents' & 'permutationsPyContents' should be "
"specified, but not two or more of these at once.")
assert ('descriptionPyContents' in self._searchParams)
# Generate the permutations.py and description.py files
outDir = self._tempDir = tempfile.mkdtemp()
permutationsScript = os.path.join(outDir, 'permutations.py')
fd = open(permutationsScript, 'w')
fd.write(self._searchParams['permutationsPyContents'])
fd.close()
fd = open(os.path.join(outDir, 'description.py'), 'w')
fd.write(self._searchParams['descriptionPyContents'])
fd.close()
else:
raise RuntimeError ("Either 'description' or 'permutationsScript' must be"
"specified")
# Get the base path of the experiment and read in the base description
self._basePath = os.path.dirname(permutationsScript)
self._baseDescription = open(os.path.join(self._basePath,
'description.py')).read()
self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()
# Read the model config to figure out the inference type
modelDescription, _ = opfhelpers.loadExperiment(self._basePath)
# Read info from permutations file. This sets up the following member
# variables:
# _predictedField
# _permutations
# _flattenedPermutations
# _encoderNames
# _reportKeys
# _filterFunc
# _optimizeKey
# _maximize
# _dummyModelParamsFunc
self._readPermutationsFile(permutationsScript, modelDescription)
# Fill in and save the base description and permutations file contents
# if they haven't already been filled in by another worker
if self._cjDAO is not None:
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genBaseDescription',
curValue=None,
newValue = self._baseDescription)
if updated:
permContents = open(permutationsScript).read()
self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='genPermutations',
curValue=None,
newValue = permContents)
# if user provided an artificialMetric, force use of the dummy model
if self._dummyModelParamsFunc is not None:
if self._dummyModel is None:
self._dummyModel = dict()
# If at DEBUG log level, print out permutations info to the log
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "Permutations file specifications: "
info = dict()
for key in ['_predictedField', '_permutations',
'_flattenedPermutations', '_encoderNames',
'_reportKeys', '_optimizeKey', '_maximize']:
info[key] = getattr(self, key)
print >> msg, pprint.pformat(info)
self.logger.debug(msg.getvalue())
msg.close()
# Instantiate our database to hold the results we received so far
self._resultsDB = ResultsDB(self)
# Instantiate the Swarm Terminator
self._swarmTerminator = SwarmTerminator()
# Initial hypersearch state
self._hsState = None
# The Max # of attempts we will make to create a unique model before
# giving up.
self._maxUniqueModelAttempts = int(Configuration.get(
'nupic.hypersearch.maxUniqueModelAttempts'))
# The max amount of time allowed before a model is considered orphaned.
self._modelOrphanIntervalSecs = float(Configuration.get(
'nupic.hypersearch.modelOrphanIntervalSecs'))
# The max percent of models that can complete with errors
self._maxPctErrModels = float(Configuration.get(
'nupic.hypersearch.maxPctErrModels'))
except:
# Clean up our temporary directory, if any
if self._tempDir is not None:
shutil.rmtree(self._tempDir)
self._tempDir = None
raise
return
def _getStreamDef(self, modelDescription):
"""
Generate stream definition based on
"""
#--------------------------------------------------------------------------
# Generate the string containing the aggregation settings.
aggregationPeriod = {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
}
# Honor any overrides provided in the stream definition
aggFunctionsDict = {}
if 'aggregation' in modelDescription['streamDef']:
for key in aggregationPeriod.keys():
if key in modelDescription['streamDef']['aggregation']:
aggregationPeriod[key] = modelDescription['streamDef']['aggregation'][key]
if 'fields' in modelDescription['streamDef']['aggregation']:
for (fieldName, func) in modelDescription['streamDef']['aggregation']['fields']:
aggFunctionsDict[fieldName] = str(func)
# Do we have any aggregation at all?
hasAggregation = False
for v in aggregationPeriod.values():
if v != 0:
hasAggregation = True
break
# Convert the aggFunctionsDict to a list
aggFunctionList = aggFunctionsDict.items()
aggregationInfo = dict(aggregationPeriod)
aggregationInfo['fields'] = aggFunctionList
streamDef = copy.deepcopy(modelDescription['streamDef'])
streamDef['aggregation'] = copy.deepcopy(aggregationInfo)
return streamDef
def __del__(self):
"""Destructor; NOTE: this is not guaranteed to be called (bugs like
circular references could prevent it from being called).
"""
self.close()
return
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate CLA model results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations)
def getExpectedNumModels(self):
"""Computes the number of models that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py for use in progress
reporting.
Parameters:
---------------------------------------------------------
retval: The total number of expected models, if known; -1 if unknown
"""
return -1
def getModelNames(self):
"""Generates a list of model names that are expected to complete as part of
this instances's HyperSearch.
NOTE: This is compute-intensive for HyperSearches with a huge number of
combinations.
NOTE/TODO: THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the
benefit of perutations_runner.py.
Parameters:
---------------------------------------------------------
retval: List of model names for this HypersearchV2 instance, or
None of not applicable
"""
return None
def getPermutationVariables(self):
"""Returns a dictionary of permutation variables.
Parameters:
---------------------------------------------------------
retval: A dictionary of permutation variables; keys are
flat permutation variable names and each value is
a sub-class of PermuteVariable.
"""
return self._flattenedPermutations
def getComplexVariableLabelLookupDict(self):
"""Generates a lookup dictionary of permutation variables whose values
are too complex for labels, so that artificial labels have to be generated
for them.
Parameters:
---------------------------------------------------------
retval: A look-up dictionary of permutation
variables whose values are too complex for labels, so
artificial labels were generated instead (e.g., "Choice0",
"Choice1", etc.); the key is the name of the complex variable
and the value is:
dict(labels=<list_of_labels>, values=<list_of_values>).
"""
raise NotImplementedError
def getOptimizationMetricInfo(self):
"""Retrives the optimization key name and optimization function.
Parameters:
---------------------------------------------------------
retval: (optimizationMetricKey, maximize)
optimizationMetricKey: which report key to optimize for
maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
"""
return (self._optimizeKey, self._maximize)
def _checkForOrphanedModels (self):
"""If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval:
"""
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0)
def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):
"""
Periodically, check to see if we should remove a certain field combination
from evaluation (because it is doing so poorly) or move on to the next
sprint (add in more fields).
This method is called from _getCandidateParticleAndSwarm(), which is called
right before we try and create a new model to run.
Parameters:
-----------------------------------------------------------------------
removeSwarmId: If not None, force a change to the current set of active
swarms by removing this swarm. This is used in situations
where we can't find any new unique models to create in
this swarm. In these situations, we update the hypersearch
state regardless of the timestamp of the last time another
worker updated it.
"""
if self._hsState is None:
self._hsState = HsState(self)
# Read in current state from the DB
self._hsState.readStateFromDB()
# This will hold the list of completed swarms that we find
completedSwarms = set()
# Mark the exhausted swarm as completing/completed, if any
if exhaustedSwarmId is not None:
self.logger.info("Removing swarm %s from the active set "
"because we can't find any new unique particle "
"positions" % (exhaustedSwarmId))
# Is it completing or completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=exhaustedSwarmId, matured=False)
if len(particles) > 0:
exhaustedSwarmStatus = 'completing'
else:
exhaustedSwarmStatus = 'completed'
# Kill all swarms that don't need to be explored based on the most recent
# information.
if self._killUselessSwarms:
self._hsState.killUselessSwarms()
# For all swarms that were in the 'completing' state, see if they have
# completed yet.
#
# Note that we are not quite sure why this doesn't automatically get handled
# when we receive notification that a model finally completed in a swarm.
# But, we ARE running into a situation, when speculativeParticles is off,
# where we have one or more swarms in the 'completing' state even though all
# models have since finished. This logic will serve as a failsafe against
# this situation.
completingSwarms = self._hsState.getCompletingSwarms()
for swarmId in completingSwarms:
# Is it completed?
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, matured=False)
if len(particles) == 0:
completedSwarms.add(swarmId)
# Are there any swarms we can remove (because they have matured)?
completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()
priorCompletedSwarms = self._hsState.getCompletedSwarms()
for (swarmId, genIdx, errScore) in completedSwarmGens:
# Don't need to report it if the swarm already completed
if swarmId in priorCompletedSwarms:
continue
completedList = self._swarmTerminator.recordDataPoint(
swarmId=swarmId, generation=genIdx, errScore=errScore)
# Update status message
statusMsg = "Completed generation #%d of swarm '%s' with a best" \
" errScore of %g" % (genIdx, swarmId, errScore)
if len(completedList) > 0:
statusMsg = "%s. Matured swarm(s): %s" % (statusMsg, completedList)
self.logger.info(statusMsg)
self._cjDAO.jobSetFields (jobID=self._jobID,
fields=dict(engStatus=statusMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Special test mode to check which swarms have terminated
if 'NTA_TEST_recordSwarmTerminations' in os.environ:
while True:
resultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if resultsStr is None:
results = {}
else:
results = json.loads(resultsStr)
if not 'terminatedSwarms' in results:
results['terminatedSwarms'] = {}
for swarm in completedList:
if swarm not in results['terminatedSwarms']:
results['terminatedSwarms'][swarm] = (genIdx,
self._swarmTerminator.swarmScores[swarm])
newResultsStr = json.dumps(results)
if newResultsStr == resultsStr:
break
updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,
fieldName='results',
curValue=resultsStr,
newValue = json.dumps(results))
if updated:
break
if len(completedList) > 0:
for name in completedList:
self.logger.info("Swarm matured: %s. Score at generation %d: "
"%s" % (name, genIdx, errScore))
completedSwarms = completedSwarms.union(completedList)
if len(completedSwarms)==0 and (exhaustedSwarmId is None):
return
# We need to mark one or more swarms as completed, keep trying until
# successful, or until some other worker does it for us.
while True:
if exhaustedSwarmId is not None:
self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)
# Mark the completed swarms as completed
for swarmId in completedSwarms:
self._hsState.setSwarmState(swarmId, 'completed')
# If nothing changed, we're done
if not self._hsState.isDirty():
return
# Update the shared Hypersearch state now
# This will do nothing and return False if some other worker beat us to it
success = self._hsState.writeStateToDB()
if success:
# Go through and cancel all models that are still running, except for
# the best model. Once the best model changes, the one that used to be
# best (and has matured) will notice that and stop itself at that point.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
bestModelId = jobResults.get('bestModel', None)
else:
bestModelId = None
for swarmId in list(completedSwarms):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmId, completed=False)
if bestModelId in modelIds:
modelIds.remove(bestModelId)
if len(modelIds) == 0:
continue
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmId,
str(modelIds)))
for modelId in modelIds:
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged = True)
return
# We were not able to change the state because some other worker beat us
# to it.
# Get the new state, and try again to apply our changes.
self._hsState.readStateFromDB()
self.logger.debug("New hsState has been set by some other worker to: "
" \n%s" % (pprint.pformat(self._hsState._state, indent=4)))
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None)
# END: while True:
# No success in this sprint, into next sprint
def _okToExit(self):
"""Test if it's OK to exit this worker. This is only called when we run
out of prospective new models to evaluate. This method sees if all models
have matured yet. If not, it will sleep for a bit and return False. This
will indicate to the hypersearch worker that we should keep running, and
check again later. This gives this worker a chance to pick up and adopt any
model which may become orphaned by another worker before it matures.
If all models have matured, this method will send a STOP message to all
matured, running models (presummably, there will be just one - the model
which thinks it's the best) before returning True.
"""
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: _okToExit"
# Any immature models still running?
if not self._jobCancelled:
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(matured=False)
if len(modelIds) > 0:
self.logger.info("Ready to end hyperseach, but not all models have " \
"matured yet. Sleeping a bit to wait for all models " \
"to mature.")
# Sleep for a bit, no need to check for orphaned models very often
time.sleep(5.0 * random.random())
return False
# All particles have matured, send a STOP signal to any that are still
# running.
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(completed=False)
for modelId in modelIds:
self.logger.info("Stopping model %d because the search has ended" \
% (modelId))
self._cjDAO.modelSetFields(modelId,
dict(engStop=ClientJobsDAO.STOP_REASON_STOPPED),
ignoreUnchanged = True)
# Update the HsState to get the accurate field contributions.
self._hsStatePeriodicUpdate()
pctFieldContributions, absFieldContributions = \
self._hsState.getFieldContributions()
# Update the results field with the new field contributions.
jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['results'])[0]
if jobResultsStr is not None:
jobResults = json.loads(jobResultsStr)
else:
jobResults = {}
# Update the fieldContributions field.
if pctFieldContributions != jobResults.get('fieldContributions', None):
jobResults['fieldContributions'] = pctFieldContributions
jobResults['absoluteFieldContributions'] = absFieldContributions
isUpdated = self._cjDAO.jobSetFieldIfEqual(self._jobID,
fieldName='results',
curValue=jobResultsStr,
newValue=json.dumps(jobResults))
if isUpdated:
self.logger.info('Successfully updated the field contributions:%s',
pctFieldContributions)
else:
self.logger.info('Failed updating the field contributions, ' \
'another hypersearch worker must have updated it')
return True
def killSwarmParticles(self, swarmID):
(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(
swarmId=swarmID, completed=False)
for modelId in modelIds:
self.logger.info("Killing the following models in swarm '%s' because"
"the swarm is being terminated: %s" % (swarmID,
str(modelIds)))
self._cjDAO.modelSetFields(
modelId, dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),
ignoreUnchanged=True)
def createModels(self, numModels=1):
"""Create one or more new models for evaluation. These should NOT be models
that we already know are in progress (i.e. those that have been sent to us
via recordModelProgress). We return a list of models to the caller
(HypersearchWorker) and if one can be successfully inserted into
the models table (i.e. it is not a duplicate) then HypersearchWorker will
turn around and call our runModel() method, passing in this model. If it
is a duplicate, HypersearchWorker will call this method again. A model
is a duplicate if either the modelParamsHash or particleHash is
identical to another entry in the model table.
The numModels is provided by HypersearchWorker as a suggestion as to how
many models to generate. This particular implementation only ever returns 1
model.
Before choosing some new models, we first do a sweep for any models that
may have been abandonded by failed workers. If/when we detect an abandoned
model, we mark it as complete and orphaned and hide it from any subsequent
queries to our ResultsDB. This effectively considers it as if it never
existed. We also change the paramsHash and particleHash in the model record
of the models table so that we can create another model with the same
params and particle status and run it (which we then do immediately).
The modelParamsHash returned for each model should be a hash (max allowed
size of ClientJobsDAO.hashMaxSize) that uniquely identifies this model by
it's params and the optional particleHash should be a hash of the particleId
and generation index. Every model that gets placed into the models database,
either by this worker or another worker, will have these hashes computed for
it. The recordModelProgress gets called for every model in the database and
the hash is used to tell which, if any, are the same as the ones this worker
generated.
NOTE: We check first ourselves for possible duplicates using the paramsHash
before we return a model. If HypersearchWorker failed to insert it (because
some other worker beat us to it), it will turn around and call our
recordModelProgress with that other model so that we now know about it. It
will then call createModels() again.
This methods returns an exit boolean and the model to evaluate. If there is
no model to evalulate, we may return False for exit because we want to stay
alive for a while, waiting for all other models to finish. This gives us
a chance to detect and pick up any possibly orphaned model by another
worker.
Parameters:
----------------------------------------------------------------------
numModels: number of models to generate
retval: (exit, models)
exit: true if this worker should exit.
models: list of tuples, one for each model. Each tuple contains:
(modelParams, modelParamsHash, particleHash)
modelParams is a dictionary containing the following elements:
structuredParams: dictionary containing all variables for
this model, with encoders represented as a dict within
this dict (or None if they are not included.
particleState: dictionary containing the state of this
particle. This includes the position and velocity of
each of it's variables, the particleId, and the particle
generation index. It contains the following keys:
id: The particle Id of the particle we are using to
generate/track this model. This is a string of the
form <hypesearchWorkerId>.<particleIdx>
genIdx: the particle's generation index. This starts at 0
and increments every time we move the particle to a
new position.
swarmId: The swarmId, which is a string of the form
<encoder>.<encoder>... that describes this swarm
varStates: dict of the variable states. The key is the
variable name, the value is a dict of the variable's
position, velocity, bestPosition, bestResult, etc.
"""
# Check for and mark orphaned models
self._checkForOrphanedModels()
modelResults = []
for _ in xrange(numModels):
candidateParticle = None
# If we've reached the max # of model to evaluate, we're done.
if (self._maxModels is not None and
(self._resultsDB.numModels() - self._resultsDB.getNumErrModels()) >=
self._maxModels):
return (self._okToExit(), [])
# If we don't already have a particle to work on, get a candidate swarm and
# particle to work with. If None is returned for the particle it means
# either that the search is over (if exitNow is also True) or that we need
# to wait for other workers to finish up their models before we can pick
# another particle to run (if exitNow is False).
if candidateParticle is None:
(exitNow, candidateParticle, candidateSwarm) = (
self._getCandidateParticleAndSwarm())
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
# Send an update status periodically to the JobTracker so that it doesn't
# think this worker is dead.
print >> sys.stderr, "reporter:status:In hypersearchV2: speculativeWait"
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
useEncoders = candidateSwarm.split('.')
numAttempts = 0
# Loop until we can create a unique model that we haven't seen yet.
while True:
# If this is the Nth attempt with the same candidate, agitate it a bit
# to find a new unique position for it.
if numAttempts >= 1:
self.logger.debug("Agitating particle to get unique position after %d "
"failed attempts in a row" % (numAttempts))
candidateParticle.agitate()
# Create the hierarchical params expected by the base description. Note
# that this is where we incorporate encoders that have no permuted
# values in them.
position = candidateParticle.getPosition()
structuredParams = dict()
def _buildStructuredParams(value, keys):
flatKey = _flattenKeys(keys)
# If it's an encoder, either put in None if it's not used, or replace
# all permuted constructor params with the actual position.
if flatKey in self._encoderNames:
if flatKey in useEncoders:
# Form encoder dict, substituting in chosen permutation values.
return value.getDict(flatKey, position)
# Encoder not used.
else:
return None
# Regular top-level variable.
elif flatKey in position:
return position[flatKey]
# Fixed override of a parameter in the base description.
else:
return value
structuredParams = rCopy(self._permutations,
_buildStructuredParams,
discardNoneKeys=False)
# Create the modelParams.
modelParams = dict(
structuredParams=structuredParams,
particleState = candidateParticle.getState()
)
# And the hashes.
m = hashlib.md5()
m.update(sortedJSONDumpS(structuredParams))
m.update(self._baseDescriptionHash)
paramsHash = m.digest()
particleInst = "%s.%s" % (modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
particleHash = hashlib.md5(particleInst).digest()
# Increase attempt counter
numAttempts += 1
# If this is a new one, and passes the filter test, exit with it.
# TODO: There is currently a problem with this filters implementation as
# it relates to self._maxUniqueModelAttempts. When there is a filter in
# effect, we should try a lot more times before we decide we have
# exhausted the parameter space for this swarm. The question is, how many
# more times?
if self._filterFunc and not self._filterFunc(structuredParams):
valid = False
else:
valid = True
if valid and self._resultsDB.getModelIDFromParamsHash(paramsHash) is None:
break
# If we've exceeded the max allowed number of attempts, mark this swarm
# as completing or completed, so we don't try and allocate any more new
# particles to it, and pick another.
if numAttempts >= self._maxUniqueModelAttempts:
(exitNow, candidateParticle, candidateSwarm) \
= self._getCandidateParticleAndSwarm(
exhaustedSwarmId=candidateSwarm)
if candidateParticle is None:
if exitNow:
return (self._okToExit(), [])
else:
time.sleep(self._speculativeWaitSecondsMax * random.random())
return (False, [])
numAttempts = 0
useEncoders = candidateSwarm.split('.')
# Log message
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Submitting new potential model to HypersearchWorker: \n%s"
% (pprint.pformat(modelParams, indent=4)))
modelResults.append((modelParams, paramsHash, particleHash))
return (False, modelResults)
def recordModelProgress(self, modelID, modelParams, modelParamsHash, results,
completed, completionReason, matured, numRecords):
"""Record or update the results for a model. This is called by the
HSW whenever it gets results info for another model, or updated results
on a model that is still running.
The first time this is called for a given modelID, the modelParams will
contain the params dict for that model and the modelParamsHash will contain
the hash of the params. Subsequent updates of the same modelID will
have params and paramsHash values of None (in order to save overhead).
The Hypersearch object should save these results into it's own working
memory into some table, which it then uses to determine what kind of
new models to create next time createModels() is called.
Parameters:
----------------------------------------------------------------------
modelID: ID of this model in models table
modelParams: params dict for this model, or None if this is just an update
of a model that it already previously reported on.
See the comments for the createModels() method for a
description of this dict.
modelParamsHash: hash of the modelParams dict, generated by the worker
that put it into the model database.
results: tuple containing (allMetrics, optimizeMetric). Each is a
dict containing metricName:result pairs. .
May be none if we have no results yet.
completed: True if the model has completed evaluation, False if it
is still running (and these are online results)
completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates
matured: True if this model has matured. In most cases, once a
model matures, it will complete as well. The only time a
model matures and does not complete is if it's currently
the best model and we choose to keep it running to generate
predictions.
numRecords: Number of records that have been processed so far by this
model.
"""
if results is None:
metricResult = None
else:
metricResult = results[1].values()[0]
# Update our database.
errScore = self._resultsDB.update(modelID=modelID,
modelParams=modelParams,modelParamsHash=modelParamsHash,
metricResult=metricResult, completed=completed,
completionReason=completionReason, matured=matured,
numRecords=numRecords)
# Log message.
self.logger.debug('Received progress on model %d: completed: %s, '
'cmpReason: %s, numRecords: %d, errScore: %s' ,
modelID, completed, completionReason, numRecords, errScore)
# Log best so far.
(bestModelID, bestResult) = self._resultsDB.bestModelIdAndErrScore()
self.logger.debug('Best err score seen so far: %s on model %s' % \
(bestResult, bestModelID))
def runModel(self, modelID, jobID, modelParams, modelParamsHash,
jobsDAO, modelCheckpointGUID):
"""Run the given model.
This runs the model described by 'modelParams'. Periodically, it updates
the results seen on the model to the model database using the databaseAO
(database Access Object) methods.
Parameters:
-------------------------------------------------------------------------
modelID: ID of this model in models table
jobID: ID for this hypersearch job in the jobs table
modelParams: parameters of this specific model
modelParams is a dictionary containing the name/value
pairs of each variable we are permuting over. Note that
variables within an encoder spec have their name
structure as:
<encoderName>.<encodrVarName>
modelParamsHash: hash of modelParamValues
jobsDAO jobs data access object - the interface to the jobs
database where model information is stored
modelCheckpointGUID: A persistent, globally-unique identifier for
constructing the model checkpoint key
"""
# We're going to make an assumption that if we're not using streams, that
# we also don't need checkpoints saved. For now, this assumption is OK
# (if there are no streams, we're typically running on a single machine
# and just save models to files) but we may want to break this out as
# a separate controllable parameter in the future
if not self._createCheckpoints:
modelCheckpointGUID = None
# Register this model in our database
self._resultsDB.update(modelID=modelID,
modelParams=modelParams,
modelParamsHash=modelParamsHash,
metricResult = None,
completed = False,
completionReason = None,
matured = False,
numRecords = 0)
# Get the structured params, which we pass to the base description
structuredParams = modelParams['structuredParams']
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.debug("Running Model. \nmodelParams: %s, \nmodelID=%s, " % \
(pprint.pformat(modelParams, indent=4), modelID))
# Record time.clock() so that we can report on cpu time
cpuTimeStart = time.clock()
# Run the experiment. This will report the results back to the models
# database for us as well.
logLevel = self.logger.getEffectiveLevel()
try:
if self._dummyModel is None or self._dummyModel is False:
(cmpReason, cmpMsg) = runModelGivenBaseAndParams(
modelID=modelID,
jobID=jobID,
baseDescription=self._baseDescription,
params=structuredParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
else:
dummyParams = dict(self._dummyModel)
dummyParams['permutationParams'] = structuredParams
if self._dummyModelParamsFunc is not None:
permInfo = dict(structuredParams)
permInfo ['generation'] = modelParams['particleState']['genIdx']
dummyParams.update(self._dummyModelParamsFunc(permInfo))
(cmpReason, cmpMsg) = runDummyModel(
modelID=modelID,
jobID=jobID,
params=dummyParams,
predictedField=self._predictedField,
reportKeys=self._reportKeys,
optimizeKey=self._optimizeKey,
jobsDAO=jobsDAO,
modelCheckpointGUID=modelCheckpointGUID,
logLevel=logLevel,
predictionCacheMaxRecords=self._predictionCacheMaxRecords)
# Write out the completion reason and message
jobsDAO.modelSetCompleted(modelID,
completionReason = cmpReason,
completionMsg = cmpMsg,
cpuTime = time.clock() - cpuTimeStart)
except InvalidConnectionException, e:
self.logger.warn("%s", e)
| agpl-3.0 |
Alwnikrotikz/visvis | functions/polarplot.py | 3 | 7019 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Keith Smith
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
#
# Thanks to Keith Smith for implementing the polar plot functionality.
import numpy as np
import visvis as vv
from visvis.utils.pypoints import Pointset, is_Point, is_Pointset
from visvis import PolarLine
def makeArray(data):
if isinstance(data, np.ndarray):
return data
else:
# create numpy array
try:
l = len(data)
a = np.empty((l, 1))
for i in range(len(data)):
a[i] = data[i]
return a
except TypeError:
raise Exception("Cannot plot %s" % data.__class__.__name__)
def _SetLimitsAfterDraw(event):
""" To be able to set the limits after the first draw. """
# Set limits
fig = event.owner
for axis in fig.FindObjects(vv.axises.PolarAxis2D):
limits = axis.GetLimits()
axis.SetLimits(rangeTheta=limits[0], rangeR=limits[1])
# Unsubscribe and redraw
fig.eventAfterDraw.Unbind(_SetLimitsAfterDraw)
fig.Draw()
def polarplot(data1, data2=None, inRadians=False,
lw=1, lc='b', ls="-", mw=7, mc='b', ms='', mew=1, mec='k',
alpha=1, axesAdjust=True, axes=None, **kwargs):
""" polarplot(*args, inRadians=False,
lw=1, lc='b', ls="-", mw=7, mc='b', ms='', mew=1, mec='k',
alpha=1, axesAdjust=True, axes=None):
Plot 2D polar data, using a polar axis to draw a polar grid.
Usage
-----
* plot(Y, ...) plots a 1D polar signal.
* plot(X, Y, ...) also supplies angular coordinates
* plot(P, ...) plots using a Point or Pointset instance
Keyword arguments
-----------------
(The longer names for the line properties can also be used)
lw : scalar
lineWidth. The width of the line. If zero, no line is drawn.
mw : scalar
markerWidth. The width of the marker. If zero, no marker is drawn.
mew : scalar
markerEdgeWidth. The width of the edge of the marker.
lc : 3-element tuple or char
lineColor. The color of the line. A tuple should represent the RGB
values between 0 and 1. If a char is given it must be
one of 'rgbmcywk', for reg, green, blue, magenta, cyan, yellow,
white, black, respectively.
mc : 3-element tuple or char
markerColor. The color of the marker. See lineColor.
mec : 3-element tuple or char
markerEdgeColor. The color of the edge of the marker.
ls : string
lineStyle. The style of the line. (See below)
ms : string
markerStyle. The style of the marker. (See below)
axesAdjust : bool
If axesAdjust==True, this function will call axes.SetLimits(), and set
the camera type to 2D.
axes : Axes instance
Display the image in this axes, or the current axes if not given.
Line styles
-----------
* Solid line: '-'
* Dotted line: ':'
* Dashed line: '--'
* Dash-dot line: '-.' or '.-'
* A line that is drawn between each pair of points: '+'
* No line: '' or None.
Marker styles
-------------
* Plus: '+'
* Cross: 'x'
* Square: 's'
* Diamond: 'd'
* Triangle (pointing up, down, left, right): '^', 'v', '<', '>'
* Pentagram star: 'p' or '*'
* Hexgram: 'h'
* Point/cirle: 'o' or '.'
* No marker: '' or None
Polar axis
----------
This polar axis has a few specialized methods for adjusting the polar
plot. Access these via vv.gca().axis.
* SetLimits(thetaRange, radialRange)
* thetaRange, radialRange = GetLimits()
* angularRefPos: Get and Set methods for the relative screen
angle of the 0 degree polar reference. Default is 0 degs
which corresponds to the positive x-axis (y =0)
* isCW: Get and Set methods for the sense of rotation CCW or
CW. This method takes/returns a bool (True if the default CW).
Interaction
-----------
* Drag mouse up/down to translate radial axis.
* Drag mouse left/right to rotate angular ref position.
* Drag mouse + shift key up/down to rescale radial axis (min R fixed).
"""
# create a dict from the properties and combine with kwargs
tmp = {'lineWidth': lw, 'lineColor': lc, 'lineStyle': ls,
'markerWidth': mw, 'markerColor': mc, 'markerStyle': ms,
'markerEdgeWidth': mew, 'markerEdgeColor': mec}
for i in tmp:
if not i in kwargs:
kwargs[i] = tmp[i]
## create the data
if is_Pointset(data1):
pp = data1
elif is_Point(data1):
pp = Pointset(data1.ndim)
pp.append(data1)
else:
if data1 is None:
raise ValueError("The first argument cannot be None!")
data1 = makeArray(data1)
if data2 is None:
# R data is given, thetadata must be
# a range starting from 0 degrees
data2 = data1
data1 = np.arange(0, data2.shape[0])
else:
data2 = makeArray(data2)
# check dimensions
L = data1.size
if L != data2.size:
raise ValueError("Array dimensions do not match! %i vs %i " %
(data1.size, data2.size))
# build points
data1 = data1.reshape((data1.size, 1))
data2 = data2.reshape((data2.size, 1))
if not inRadians:
data1 = np.pi * data1 / 180.0
## create the line
if axes is None:
axes = vv.gca()
axes.axisType = 'polar'
fig = axes.GetFigure()
l = PolarLine(axes, data1, data2)
l.lw = kwargs['lineWidth']
l.lc = kwargs['lineColor']
l.ls = kwargs['lineStyle']
l.mw = kwargs['markerWidth']
l.mc = kwargs['markerColor']
l.ms = kwargs['markerStyle']
l.mew = kwargs['markerEdgeWidth']
l.mec = kwargs['markerEdgeColor']
l.alpha = alpha
## almost done...
# Init axis
# axes.axis.SetLimits()
if axesAdjust:
if axes.daspectAuto is None:
axes.daspectAuto = True
axes.cameraType = '2d'
axes.SetLimits()
# Subsribe after-draw event handler
# (unsubscribe first in case we do multiple plots)
fig.eventAfterDraw.Unbind(_SetLimitsAfterDraw)
fig.eventAfterDraw.Bind(_SetLimitsAfterDraw)
# Return
axes.Draw()
return l
if __name__ == '__main__':
# Make data
import numpy as np
angs = 0.1 + np.linspace(-90, 90, 181) # 0.1+ get rid of singularity
angsRads = np.pi * angs / 180.0
mag = 10 * np.log10(np.abs(np.sin(10 * angsRads) / angsRads)) + angsRads
mag = mag - np.max(mag)
# Show data
vv.polarplot( angs, mag, lc='b')
vv.polarplot(angs+20, mag, lc='r', lw=2)
a = vv.gca() # Triggers an update required for polar plots
| bsd-3-clause |
garrettcap/Bulletproof-Backup | Python2.7/lib/python2.7/site-packages/pip/vendor/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| gpl-2.0 |
cypresskr/linux | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py | 121 | 11443 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for watchlistparser.py.'''
import logging
import sys
from webkitpy.common import webkitunittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.watchlist.watchlistparser import WatchListParser
class WatchListParserTest(webkitunittest.TestCase):
def setUp(self):
webkitunittest.TestCase.setUp(self)
self._watch_list_parser = WatchListParser()
def test_bad_section(self):
watch_list = ('{"FOO": {}}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown section "FOO" in watch list.\n')
def test_section_typo(self):
watch_list = ('{"DEFINTIONS": {}}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown section "DEFINTIONS" in watch list.'
+ '\n\nPerhaps it should be DEFINITIONS.\n')
def test_bad_definition(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1|A": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Invalid character "|" in definition "WatchList1|A".\n')
def test_bad_filename_regex(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r"*",'
' "more": r"RefCounted",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
def test_bad_more_regex(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r"aFileName\\.cpp",'
' "more": r"*",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
def test_bad_match_type(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "nothing_matches_this": r".*\\MyFileName\\.cpp",'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown pattern type "nothing_matches_this" in definition "WatchList1".\n')
def test_match_type_typo(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "iflename": r".*\\MyFileName\\.cpp",'
' "more": r"RefCounted",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown pattern type "iflename" in definition "WatchList1".'
+ '\n\nPerhaps it should be filename.\n')
def test_empty_definition(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The definition "WatchList1" has no patterns, so it should be deleted.\n')
def test_empty_cc_rule(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": [],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+ 'The following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_invalid_email(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin+bad+email@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The email alias levin+bad+email@chromium.org which is'
+ ' in the watchlist is not listed as a contributor in committers.py\n')
def test_cc_rule_with_secondary_email(self):
# FIXME: We should provide a mock of CommitterList so that we can test this on fake data.
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["ojan.autocc@gmail.com"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='')
def test_empty_message_rule(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "MESSAGE_RULES": {'
' "WatchList1": ['
' ],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+ 'The following definitions are not used and should be removed: WatchList1\n')
def test_unused_defintion(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_undefined_defintion(self):
watch_list = (
'{'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList1\n')
def test_message_rule_with_undefined_defintion(self):
watch_list = (
'{'
' "MESSAGE_RULES": {'
' "WatchList1": ["The message."]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "MESSAGE_RULES", the following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_undefined_defintion_with_suggestion(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList": ["levin@chromium.org"]'
' },'
' "MESSAGE_RULES": {'
' "WatchList1": ["levin@chromium.org"]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList'
+ '\n\nPerhaps it should be WatchList1.\n')
| bsd-3-clause |
pchaigno/grreat | lib/checks/filters_test.py | 4 | 3125 | #!/usr/bin/env python
"""Tests for grr.lib.rdfvalues.checks."""
import collections
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.checks import filters
from grr.lib.rdfvalues import checks
# Just a named tuple that can be used to test objectfilter expressions.
Sample = collections.namedtuple("Sample", ["x", "y"])
class FilterTests(test_lib.GRRBaseTest):
"""Test filter methods and operations."""
def testNonexistentFilterIsError(self):
self.assertRaises(filters.DefinitionError, checks.Filter, type="NoFilter")
def testFilter(self):
kwargs = {"type": "ObjectFilter"}
filt = checks.Filter(**kwargs)
self.assertEqual(filt.type, kwargs["type"])
self.assertIsInstance(filt, checks.Filter)
# Ensure the filter hook is initialized as well.
self.assertIsInstance(filt._filter, filters.ObjectFilter)
def testFilterWithExpression(self):
kwargs = {"type": "ObjectFilter", "expression": "do stuff"}
filt = checks.Filter(**kwargs)
self.assertIsInstance(filt, checks.Filter)
self.assertEqual(filt.type, kwargs["type"])
self.assertEqual(filt.expression, kwargs["expression"])
def testFilterRegistry(self):
self.assertIsInstance(filters.Filter.GetFilter("ObjectFilter"),
filters.ObjectFilter)
self.assertRaises(filters.DefinitionError, filters.Filter.GetFilter, "???")
class HandlerTests(test_lib.GRRBaseTest):
"""Test handler operations."""
def setUp(self):
super(HandlerTests, self).setUp()
fx0 = checks.Filter({"type": "ObjectFilter", "expression": "x == 0"})
fy0 = checks.Filter({"type": "ObjectFilter", "expression": "y == 0"})
bad = checks.Filter({"type": "ObjectFilter", "expression": "y =="})
self.ok = [fx0, fy0]
self.bad = [fx0, fy0, bad]
self.all = [Sample(0, 0), Sample(0, 1), Sample(1, 0), Sample(1, 1)]
self.serial = [Sample(0, 0)]
self.parallel = [Sample(0, 0), Sample(0, 1), Sample(1, 0)]
def GetFilters(self, filt_defs):
"""Initialize one or more filters as if they were contained in a probe."""
# The artifact isn't actually used for anything, it's just required to
# initialize handlers.
probe = rdfvalue.Probe(artifact="Data", filters=filt_defs)
return probe.filters
def testValidateFilters(self):
self.assertEquals(2, len(self.GetFilters(self.ok)))
self.assertRaises(filters.DefinitionError, self.GetFilters, self.bad)
def testNoOpHandler(self):
h = filters.GetHandler("PASSTHROUGH")
handler = h("Data", filters=self.GetFilters(self.ok))
self.assertItemsEqual(self.all, handler.Parse(self.all))
def testParallelHandler(self):
h = filters.GetHandler("PARALLEL")
handler = h("Data", filters=self.GetFilters(self.ok))
self.assertItemsEqual(self.parallel, handler.Parse(self.all))
def testSerialHandler(self):
h = filters.GetHandler("SERIAL")
handler = h("Data", filters=self.GetFilters(self.ok))
self.assertItemsEqual(self.serial, handler.Parse(self.all))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
Krossom/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/codesearch_test.py | 133 | 1930 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import gdata.codesearch
import gdata.test_data
class CodeSearchDataTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.codesearch.CodesearchFeedFromString(
gdata.test_data.CODE_SEARCH_FEED)
def testCorrectXmlConversion(self):
self.assert_(self.feed.id.text ==
'http://www.google.com/codesearch/feeds/search?q=malloc')
self.assert_(len(self.feed.entry) == 10)
for entry in self.feed.entry:
if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma'
'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1'
'&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco'
'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa'
're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'):
self.assert_(len(entry.match) == 4)
for match in entry.match:
if match.line_number == '4':
self.assert_(match.type == 'text/html')
self.assert_(entry.file.name ==
'software/autoconf/manual/autoconf-2.60/autoconf.html-002')
self.assert_(entry.package.name == 'http://www.gnu.org')
self.assert_(entry.package.uri == 'http://www.gnu.org')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
tvwenger/galfacts | channel.py | 1 | 1516 | """
channel.py
Channel object for GALFACTS transient search
02 June 2014 - Trey Wenger - creation
"""
import os
import sys
import numpy as np
class Channel(object):
"""Channel object for GALFACTS transient search"""
def __init__(self, chan_num, beam_num, **options):
"""Initialize the channel object"""
self.chan_file = "{0}/{1}/{2}/beam{3}/fluxtime{4:04d}.dat".\
format(options["data_filepath"],
options["field"],
options["date"],
beam_num,
chan_num)
ra,dec,ast,I,Q,U,V = np.loadtxt(self.chan_file,unpack=True)
self.num_points = len(ra)
self.error = (not os.path.isfile(self.chan_file))
def average(self):
"""Return the average Stokes for this channel"""
ra,dec,ast,I,Q,U,V = np.loadtxt(self.chan_file,unpack=True)
self.num_points = len(ra)
return (np.mean(I), np.mean(Q), np.mean(U), np.mean(V))
def add_points(self, Iarr, Qarr, Uarr, Varr):
"""Add these channel's points to the running I, Q, U, V total
for each timestamp"""
ra,dec,ast,I,Q,U,V = np.loadtxt(self.chan_file,unpack=True)
return (Iarr + I, Qarr + Q, Uarr + U, Varr + V)
def get_coordinates(self):
"""Get the AST, RA, and DEC for this channel"""
ra,dec,ast,I,Q,U,V = np.loadtxt(self.chan_file,unpack=True)
return ra, dec, ast
if __name__ == "__main__":
sys.exit("Error: module not meant to be run at top level.")
| mit |
jjmiranda/edx-platform | lms/djangoapps/instructor_task/tests/test_models.py | 4 | 4163 | """
Tests for instructor_task/models.py.
"""
import copy
from cStringIO import StringIO
import time
import boto
from django.conf import settings
from django.test import SimpleTestCase, override_settings
from mock import patch
from common.test.utils import MockS3Mixin
from instructor_task.models import ReportStore
from instructor_task.tests.test_base import TestReportMixin
from opaque_keys.edx.locator import CourseLocator
class ReportStoreTestMixin(object):
"""
Mixin for report store tests.
"""
def setUp(self):
super(ReportStoreTestMixin, self).setUp()
self.course_id = CourseLocator(org="testx", course="coursex", run="runx")
def create_report_store(self):
"""
Subclasses should override this and return their report store.
"""
pass
def test_links_for_order(self):
"""
Test that ReportStore.links_for() returns file download links
in reverse chronological order.
"""
report_store = self.create_report_store()
self.assertEqual(report_store.links_for(self.course_id), [])
report_store.store(self.course_id, 'old_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'middle_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'new_file', StringIO())
self.assertEqual(
[link[0] for link in report_store.links_for(self.course_id)],
['new_file', 'middle_file', 'old_file']
)
class LocalFSReportStoreTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the old LocalFSReportStore configuration.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
LocalFSReportStore configuration.
"""
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
@patch.dict(settings.GRADES_DOWNLOAD, {'STORAGE_TYPE': 's3'})
class S3ReportStoreTestCase(MockS3Mixin, ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the old S3ReportStore configuration.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
S3ReportStore configuration.
"""
connection = boto.connect_s3()
connection.create_bucket(settings.GRADES_DOWNLOAD['BUCKET'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreLocalTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using the local
filesystem.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use the
local filesystem for storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_KWARGS'] = {'location': settings.GRADES_DOWNLOAD['ROOT_PATH']}
with override_settings(GRADES_DOWNLOAD=test_settings):
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreS3TestCase(MockS3Mixin, ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using S3 stubs.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use S3 for
storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_CLASS'] = 'storages.backends.s3boto.S3BotoStorage'
test_settings['STORAGE_KWARGS'] = {
'bucket': settings.GRADES_DOWNLOAD['BUCKET'],
'location': settings.GRADES_DOWNLOAD['ROOT_PATH'],
}
with override_settings(GRADES_DOWNLOAD=test_settings):
connection = boto.connect_s3()
connection.create_bucket(settings.GRADES_DOWNLOAD['STORAGE_KWARGS']['bucket'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
| agpl-3.0 |
indradhanush/filesync-server | src/backends/db/tests/test_dbwatcher.py | 6 | 10712 | # Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Tests for the backends.tools.dbwatcher module."""
__metaclass__ = type
import mocker
from backends.db import dbwatcher
class StubModule:
"""A stub implementation of part of the DB-API module interface."""
def __init__(self):
pass
def connect(self, *args, **kwargs):
"""Create a stub connection."""
return StubConnection()
class StubConnection:
"""A stub implementation of the DB-API connection interface."""
connection_attribute = 42
def __init__(self):
pass
def cursor(self):
"""Create a stub cursor."""
return StubCursor()
def commit(self):
"""Commit the transaction."""
pass
def rollback(self):
"""Commit the transaction."""
pass
class StubCursor:
"""A stub implementation of the DB-API cursor interface."""
cursor_attribute = 42
def __init__(self):
pass
def execute(self, statement, params=None):
"""Execute a statement."""
pass
def executemany(self, statement, params=None):
"""Execute a statement multiple times."""
pass
class DbWatcherTests(mocker.MockerTestCase):
"""Tests for the DatabaseWatcher class."""
def test_install_uninstall(self):
"""The install() and uninstall() methods correctly alter the module."""
module = StubModule()
orig_connect = module.connect
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
self.assertNotEqual(module.connect, orig_connect)
self.assertEqual(module.connect, watcher._connect)
self.assertEqual(watcher._orig_connect, orig_connect)
watcher.uninstall()
self.assertEqual(module.connect, orig_connect)
def test_enable_disable(self):
"""The enable() and disable() methods restrict access to databases."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
self.assertRaises(
dbwatcher.DatabaseNotEnabled, watcher._check_enabled, 'foo')
watcher.enable('foo')
watcher._check_enabled('foo')
# Multiple invocations fail.
self.assertRaises(AssertionError, watcher.enable, 'foo')
watcher.disable('foo')
self.assertRaises(
dbwatcher.DatabaseNotEnabled, watcher._check_enabled, 'foo')
self.assertRaises(AssertionError, watcher.disable, 'foo')
def test_connect(self):
"""Connection wrappers know what database they are for."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
watcher.enable('foo')
conn = module.connect('dbname=foo')
self.assertTrue(isinstance(conn, dbwatcher.ConnectionWrapper))
self.assertEqual(conn._dbname, 'foo')
def test_connect_kwargs(self):
"""Connection wrappers know what database they are for."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
watcher.enable('foo')
conn = module.connect(database='foo')
self.assertTrue(isinstance(conn, dbwatcher.ConnectionWrapper))
self.assertTrue(isinstance(conn._real_connection, StubConnection))
self.assertEqual(conn._dbname, 'foo')
def test_connection_cursor(self):
"""Wrapped connections create wrapped cursors."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
watcher.enable('foo')
conn = module.connect(database='foo')
cursor = conn.cursor()
self.assertTrue(isinstance(cursor, dbwatcher.CursorWrapper))
self.assertTrue(isinstance(cursor._real_cursor, StubCursor))
self.assertTrue(isinstance(cursor._connection,
dbwatcher.ConnectionWrapper))
def test_connection_commit(self):
"""Commits on the connection are reported to the watcher."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
self.expect(MockDatabaseWatcher._check_enabled('foo')).count(2)
MockDatabaseWatcher._saw_commit('foo')
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
conn = module.connect(database='foo')
conn.commit()
def test_connection_attr_access(self):
"""Attribute access is passed through to the underlying connection."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
watcher.enable('foo')
conn = module.connect(database='foo')
self.assertTrue(conn.connection_attribute, 42)
conn.connection_attribute = 43
self.assertTrue(conn.connection_attribute, 43)
self.assertRaises(AttributeError, getattr, conn, 'no_such_attr')
def test_cursor_execute(self):
"""execute() on cursors is reported to the watcher."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
self.expect(MockDatabaseWatcher._check_enabled('foo')).count(2)
MockDatabaseWatcher._saw_execute('foo')
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
conn = module.connect(database='foo')
cursor = conn.cursor()
cursor.execute('dummy sql')
def test_cursor_executemany(self):
"""executemany() on cursors is reported to the watcher."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
self.expect(MockDatabaseWatcher._check_enabled('foo')).count(2)
MockDatabaseWatcher._saw_execute('foo')
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
conn = module.connect(database='foo')
cursor = conn.cursor()
cursor.executemany('dummy sql', [[1], [2]])
def test_cursor_attr_access(self):
"""Attribute access is passed through to the underlying cursor."""
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
watcher.install()
watcher.enable('foo')
conn = module.connect(database='foo')
cursor = conn.cursor()
self.assertTrue(cursor.cursor_attribute, 42)
cursor.cursor_attribute = 43
self.assertTrue(cursor.cursor_attribute, 43)
self.assertRaises(AttributeError, getattr, cursor, 'no_such_attr')
def test_saw_execute(self):
"""The _saw_execute() method sends notifications when appropriate."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
MockDatabaseWatcher._notify('foo')
MockDatabaseWatcher._notify('bar')
self.expect(MockDatabaseWatcher._notify('foo', True)).count(0)
self.expect(MockDatabaseWatcher._notify('bar', True)).count(0)
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
# Only a single notification is sent, even though two executes
# are seen on 'foo'.
watcher._saw_execute('foo')
watcher._saw_execute('foo')
self.assertTrue('foo' in watcher._used_databases)
watcher._saw_execute('bar')
def test_saw_commit(self):
"""The _saw_commit() method sends notifications when appropriate."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
MockDatabaseWatcher._notify('foo', True)
MockDatabaseWatcher._notify('bar')
MockDatabaseWatcher._notify('bar', True)
self.expect(MockDatabaseWatcher._notify('foo')).count(0)
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
# Only a single notification is sent, even though two executes
# are seen on 'foo'.
watcher._saw_commit('foo')
watcher._saw_commit('foo')
# Executes after a commit do not trigger a notification.
watcher._saw_execute('foo')
# In the other order, both notifications are sent.
watcher._saw_execute('bar')
watcher._saw_commit('bar')
def test_reset(self):
"""The reset() method allows notifications to begin again."""
MockDatabaseWatcher = self.mocker.patch(dbwatcher.DatabaseWatcher)
self.expect(MockDatabaseWatcher._notify('foo')).count(2)
self.expect(MockDatabaseWatcher._notify('bar', True)).count(2)
self.mocker.replay()
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
# One notification:
watcher._saw_execute('foo')
watcher._saw_execute('foo')
# And a second after reset:
watcher.reset('foo')
watcher._saw_execute('foo')
# Same for commits:
watcher._saw_commit('bar')
watcher._saw_commit('bar')
watcher.reset('bar')
watcher._saw_commit('bar')
def test_notify(self):
"""The _notify() method calls """
module = StubModule()
watcher = dbwatcher.DatabaseWatcher(module)
calls = []
def callback(dbname, commit):
"""Log arguments passed to the callback."""
calls.append((dbname, commit))
watcher.hook('foo', callback)
watcher._notify('foo')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[-1], ('foo', False))
watcher._notify('foo', commit=True)
self.assertEqual(len(calls), 2)
self.assertEqual(calls[-1], ('foo', True))
# No notification sent for other databases.
watcher._notify('bar')
self.assertEqual(len(calls), 2)
# Unhooking the callback stops notification too.
watcher.unhook('foo', callback)
watcher._notify('foo')
self.assertEqual(len(calls), 2)
| agpl-3.0 |
zooko/ogresvpixies | HexBoard.py | 1 | 16946 | CVS_ID = '$Id: HexBoard.py,v 1.3 2002/02/09 22:46:13 zooko Exp $'
import java
from java.awt import *
from java.awt.event import *
from java.awt.geom import *
from javax.swing import *
from javax.swing.text import *
import math
from types import *
from util import *
true = 1
false = 0
LINEWIDTH=2 # ???
# Hex instance sequence functions:
def all_contain_a(hs, klass):
return len(filter(None, [(h is None) or h.contains_a(klass) for h in hs])) == len(hs)
def any_contain_a(hs, klass):
return len(filter(None, [(h is None) or h.contains_a(klass) for h in hs])) != 0
def all_contain_only(hs, klass):
return len(filter(None, [(h is None) or h.contains_only(klass) for h in hs])) == len(hs)
def all_are_empty(hs):
return len(filter(None, [(h is None) or h.is_empty() for h in hs])) == len(hs)
# Coordinate functions:
def nw_of(coord):
return (coord[0] - 1 + coord[1]%2, coord[1] + 1,)
def ne_of(coord):
return (coord[0] + coord[1]%2, coord[1] + 1,)
def e_of(coord):
return (coord[0] + 1, coord[1],)
def w_of(coord):
return (coord[0] - 1, coord[1],)
def se_of(coord):
return (coord[0] + coord[1]%2, coord[1] - 1,)
def sw_of(coord):
return (coord[0] - 1 + coord[1]%2, coord[1] - 1,)
# Coordinate-based path related functions:
def distance(a, b):
"""Returns the distance, in hexes, between two hex arguments."""
path = shortest_path(a, b, hopsleft=30)
assert path[-1] is not None, "ACK! Bad distance, %s, between %s and %s; path: %s" % (`len(path)`, `a`, `b`, `path`)
return len(path)
def shortest_path(src, dst, hopsleft=None):
"""
shortest_path(src, dst, hopsleft=None) -> path
Returns a sequence of hex coordinates.
src and dst are tuples of hex coordinates.
hopsleft is None or a positive integer; if an integer the path stops after that many hops.
The return value is a sequence of hex coordinate tuples. If the maximum
number of hops was reached, the last element of path is None, else it is dst.
"""
# This first implementation is a slow, recursive approach.
if hopsleft is not None:
hopsleft -= 1
if hopsleft == 0:
return (None,) # signify the path was too long.
dx = (src[0] - dst[0])
dy = (src[1] - dst[1])
#print "shortest_path: %r hopsleft %s -> %s | dx: % 4s | dy: % 4s" % (hopsleft, repr(src), repr(dst), dx, dy),
if dst == src:
#print 'Finished.'
return ()
elif dx == 0:
#print "Same column, which way?", src[1] % 2
if dy < 0:
if src[1] % 2 == 1:
#print 'NW'
return (nw_of(src),) + shortest_path(nw_of(src), dst, hopsleft)
else:
#print 'NE'
return (ne_of(src),) + shortest_path(ne_of(src), dst, hopsleft)
else:
if src[1] % 2 == 1:
#print 'SW'
return (sw_of(src),) + shortest_path(sw_of(src), dst, hopsleft)
else:
#print 'SE'
return (se_of(src),) + shortest_path(se_of(src), dst, hopsleft)
elif abs(dx) <= abs(dy):
if dx < 0 and dy < 0:
#print 'NE'
return (ne_of(src),) + shortest_path(ne_of(src), dst, hopsleft)
elif dx < 0 and dy > 0:
#print 'SE'
return (se_of(src),) + shortest_path(se_of(src), dst, hopsleft)
elif dx > 0 and dy < 0:
#print 'NW'
return (nw_of(src),) + shortest_path(nw_of(src), dst, hopsleft)
else: #elif dx > 0 and dy > 0:
#print 'SW'
return (sw_of(src),) + shortest_path(sw_of(src), dst, hopsleft)
else:
if dx < 0:
#print 'E'
return (e_of(src),) + shortest_path(e_of(src), dst, hopsleft)
else:
#print 'W'
return (w_of(src),) + shortest_path(w_of(src), dst, hopsleft)
class Hex:
def __init__(self, hb, hx, hy, bordercolor=Color.green, bgcolor=Color.black, hicolor=Color.pink):
self.bordercolor = bordercolor
self.bgcolor = bgcolor
self.hicolor = hicolor
self.items = [] # a list of items (the first item is drawn first, so higher-indexed items might overwrite it)
self.hb = hb
self.hx = hx
self.hy = hy
self.hb.hexes[(hx, hy,)] = self
self.cx = int(self.hb.cxoffset + ((hx + ((hy % 2)*0.5)) * self.hb.w))
self.cy = int(self.hb.cyoffset - (hy*0.75*self.hb.h))
self.highlightflag = 0
self.hexonboardpoly = Polygon(self.hb.hexpoly.xpoints, self.hb.hexpoly.ypoints, self.hb.hexpoly.npoints)
self.hexonboardpoly.translate(self.cx, self.cy)
self.boundingrect = Rectangle(self.cx, self.cy, self.hb.wplusline, self.hb.hplusline)
def __repr__(self):
return "hex(%d, %d)" % (self.hx, self.hy,)
def highlight(self):
"""Sets the highlight flag of this hex."""
self.highlightflag = 1
self.repaint()
def unhighlight(self):
"""Clears the highlight flag of this hex."""
self.highlightflag = 0
self.repaint()
def is_highlighted(self):
return self.highlightflag
def contains(self, pt):
"""
graphics...
"""
return self.hexonboardpoly.contains(pt)
def contains_a(self, klass):
return len(self.get_all(klass)) > 0
def contains_only(self, klass):
return len(filter(lambda x, klass=klass: not isinstance(x, klass), self.items)) == 0
def is_empty(self):
return len(self.items) == 0
def get_all(self, klass):
return filter(lambda x, klass=klass: isinstance(x, klass), self.items)
def intersects(self, rect):
return self.hexonboardpoly.intersects(rect)
def is_adjacent(self, otherhex):
return ((abs(otherhex.hx-self.hx)==1) and (otherhex.hy==self.hy)) or ((abs(otherhex.hy-self.hy)==1) and (((otherhex.hx==self.hx)) or (otherhex.hx-self.hx==((self.hy % 2)*2-1))))
def is_adjacent_to_a(self, klass):
return self.count_adjacent(klass) > 0
def count_adjacent(self, klass):
sum = 0
for adj in self.get_adjacent_hexes():
if adj.contains_a(klass):
sum += 1
return sum
def get_adjacent_hexes(self):
"""
excludes Nones
"""
res = []
for (dx, dy,) in ( (-1, 0), (1, 0), (0, 1), (0, -1), (((self.hy % 2)*2-1), 1), (((self.hy % 2)*2-1), -1) ,):
if self.hb.hexes.has_key((self.hx+dx, self.hy+dy,)):
res.append(self.hb.hexes[(self.hx+dx, self.hy+dy,)])
return res
def get_ordered_adjacent_hexes(self):
"""
includes Nones
"""
return (self.get_nw(), self.get_ne(), self.get_e(), self.get_se(), self.get_sw(), self.get_w(),)
def get_east_trio(self):
return self.get_ordered_adjacent_hexes()[::2]
def get_west_trio(self):
return self.get_ordered_adjacent_hexes()[1::2]
def get_circle_successor(self, middlehex):
"""
@returns the hex that is the next hex from `self' in the circle that surrounds `middlehex' (in a clockwise direction)
@precondition `middlehex' must be adjacent to this hex
"""
assert self.is_adjacent(middlehex)
if middlehex.get_nw() is self:
return middlehex.get_ne()
if middlehex.get_ne() is self:
return middlehex.get_e()
if middlehex.get_e() is self:
return middlehex.get_se()
if middlehex.get_se() is self:
return middlehex.get_sw()
if middlehex.get_sw() is self:
return middlehex.get_w()
if middlehex.get_w() is self:
return middlehex.get_nw()
def get_circle_predecessor(self, middlehex):
"""
@returns the hex that is the next hex from `self' in the circle that surrounds `middlehex' (in a counter-clockwise direction)
@precondition `middlehex' must be adjacent to this hex
"""
assert self.is_adjacent(middlehex)
if middlehex.get_nw() is self:
return middlehex.get_w()
if middlehex.get_ne() is self:
return middlehex.get_nw()
if middlehex.get_e() is self:
return middlehex.get_ne()
if middlehex.get_se() is self:
return middlehex.get_e()
if middlehex.get_sw() is self:
return middlehex.get_se()
if middlehex.get_w() is self:
return middlehex.get_sw()
def get_opposite(self, middlehex):
"""
@returns the hex that lies directly across `middlehex' from this hex, or None if none
@precondition `middlehex' must be adjacent to this hex
"""
assert self.is_adjacent(middlehex)
if middlehex.get_nw() is self:
return middlehex.get_se()
if middlehex.get_ne() is self:
return middlehex.get_sw()
if middlehex.get_e() is self:
return middlehex.get_w()
if middlehex.get_se() is self:
return middlehex.get_nw()
if middlehex.get_sw() is self:
return middlehex.get_ne()
if middlehex.get_w() is self:
return middlehex.get_e()
def get_nw(self):
return self.hb.hexes.get(nw_of((self.hx, self.hy,)))
def get_ne(self):
return self.hb.hexes.get(ne_of((self.hx, self.hy,)))
def get_e(self):
return self.hb.hexes.get(e_of((self.hx, self.hy,)))
def get_w(self):
return self.hb.hexes.get(w_of((self.hx, self.hy,)))
def get_se(self):
return self.hb.hexes.get(se_of((self.hx, self.hy,)))
def get_sw(self):
return self.hb.hexes.get(sw_of((self.hx, self.hy,)))
def paint(self, g):
g.setColor(self.is_highlighted() and self.hicolor or self.bgcolor)
g.fill(self.hb.hexpoly)
g.setColor(self.bordercolor)
g.draw(self.hb.hexpoly)
for i in self.items:
try:
i.paint(g)
except:
print "i: %s" % i
raise
def repaint(self):
# Tell the HexBoard to repaint only this hex's bounding rectangle:
self.hb.repaint(self.boundingrect)
# The short-circuit way: just repaint that hex. We do this by adding the hex coords to our "hexrepaintqueue" and then signalling ourselves that this is a "hexrepaint" call by asking to repaint a rectangle that is a crummy little corner of the board.
# self.hb.hexrepaintqueue.append((hxloc, hyloc,))
# self.hb.repaint(self.signalrect)
# Hrm. Whoops -- that signalling mechanism doesn't work because only the signalrect is included in the clipping. Hrm.... Okay after reading lots of docs about this, it looks like "incremental painting" as its called, just isn't possible in Swing (without digging in too deep under the Swing abstraction). Bummer.
class HexBoard(JPanel):
def __init__(self, cxoffset=10, cyoffset=10, scale=100):
JPanel.__init__(self)
self.setOpaque(true)
self.cxoffset = cxoffset
self.cyoffset = cyoffset
self.s = scale
self.w = int(self.s*math.sqrt(3))
self.h = self.s*2
self.wplusline = self.w+LINEWIDTH
self.hplusline = self.h+LINEWIDTH
self.hexpoly = Polygon()
self.hexpoly.addPoint(self.w/2, 0)
self.hexpoly.addPoint(self.w, self.h/4)
self.hexpoly.addPoint(self.w, (self.h*3)/4)
self.hexpoly.addPoint(self.w/2, self.h)
self.hexpoly.addPoint(0, (self.h*3)/4)
self.hexpoly.addPoint(0, self.h/4)
self.hexinnerpoly = Polygon()
hyp = self.s/8.0
adj = hyp*math.sqrt(3)/2.0
opp = hyp/2.0
self.hexinnerpoly.addPoint(int(self.w/2), int(0+hyp))
self.hexinnerpoly.addPoint(int(self.w-adj), int(self.h/4+opp))
self.hexinnerpoly.addPoint(int(self.w-adj), int((self.h*3)/4-opp))
self.hexinnerpoly.addPoint(int(self.w/2), int(self.h-hyp))
self.hexinnerpoly.addPoint(int(0+adj), int((self.h*3)/4-opp))
self.hexinnerpoly.addPoint(int(0+adj), int(self.h/4+opp))
self.hextophalfpoly = Polygon()
self.hextophalfpoly.addPoint(self.w/2, 0)
self.hextophalfpoly.addPoint(self.w, self.h/4)
self.hextophalfpoly.addPoint(self.w, self.h/2)
self.hextophalfpoly.addPoint(0, self.h/2)
self.hextophalfpoly.addPoint(0, self.h/4)
self.hexbottomhalfpoly = Polygon()
self.hexbottomhalfpoly.addPoint(self.w, self.h/2)
self.hexbottomhalfpoly.addPoint(self.w, (self.h*3)/4)
self.hexbottomhalfpoly.addPoint(self.w/2, self.h)
self.hexbottomhalfpoly.addPoint(0, (self.h*3)/4)
self.hexbottomhalfpoly.addPoint(0, self.h/2)
self.treepoly = Polygon()
TREEHEIGHT = int(self.h*0.4)
TREEWIDTH = int(self.w*0.3)
self.treepoly.addPoint(self.w/2, (self.h-TREEHEIGHT)/2)
self.treepoly.addPoint((self.w+TREEWIDTH)/2, (self.h+TREEHEIGHT)/2)
self.treepoly.addPoint((self.w-TREEWIDTH)/2, (self.h+TREEHEIGHT)/2)
self.treeinnerpoly = Polygon()
self.treeinnerpoly.addPoint(self.w/2, (self.h-TREEHEIGHT)/2+1)
self.treeinnerpoly.addPoint((self.w+TREEWIDTH)/2-1, (self.h+TREEHEIGHT)/2-1)
self.treeinnerpoly.addPoint((self.w-TREEWIDTH)/2+1, (self.h+TREEHEIGHT)/2-1)
self.scrollpoly = Polygon()
SCROLLHEIGHT = int(self.h*0.4)
SCROLLWIDTH = int(self.w*0.3)
self.scrollpoly.addPoint(self.w/2, (self.h-SCROLLHEIGHT)/2)
self.scrollpoly.addPoint(self.w/2+self.s/4, (self.h-SCROLLHEIGHT)/2+self.s/8)
self.scrollpoly.addPoint(self.w/2+self.s/4, (self.h-SCROLLHEIGHT)/2+SCROLLHEIGHT+self.s/8)
self.scrollpoly.addPoint(self.w/2, (self.h-SCROLLHEIGHT)/2+SCROLLHEIGHT)
self.hexes = {} # key: (hx, hy,), value = instance of Hex
def get(self, hc):
if is_type(hc, SequenceTypes) and len(hc) == 2:
pass # no change needed.
elif hasattr(hc, 'hex'):
hc = hc.hex
else:
raise Exception("Unknown hex indicator: %s" % repr(hc))
if is_type(hc, Hex):
# This is kinda funny; for an example, see HexBoard.highlight_path().
hc = (hc.hx, hc.hy)
return self.hexes.get(hc)
def get_many(self, sequence):
return map(self.get, sequence)
def pick_hex(self, pt):
"""
@return hex that the cartesian coordinate `pt' falls into, or None if none
"""
# XXX TODO: do this in a nice efficient manner. :-)
for hex in self.hexes.values():
if hex.contains(pt):
return hex
return None
def paintComponent(self, g):
self.super__paintComponent(g)
cliprect = g.getClipBounds()
# print "HB.paintComponent() cliprect: ", cliprect
# XXX TODO: pick hexes more efficiently. (In case we need to have 1000x1000 hexes visible at once. ;-))
for hex in self.hexes.values():
if hex.intersects(cliprect):
# print "cliprect: %s, hex: %s" % (cliprect, hex,)
# newg = g.create(hex.boundingrect) # Why isn't this supported by the Java API? :-<
newg = g.create(hex.boundingrect.x, hex.boundingrect.y, hex.boundingrect.width, hex.boundingrect.height)
hex.paint(newg)
def unhighlight_all(self):
[hex.unhighlight() for hex in self.hexes.values()]
def get_empty_hex(self, minhx=None, minhy=None, maxhx=None, maxhy=None):
"""
@return a randomly chosen empty hex or None if there are no more empty hexes
"""
for hex in rand_rotate(self.hexes.values()):
if hex.is_empty() and ((minhx is None) or (hex.hx >= minhx)) and ((minhy is None) or (hex.hy >= minhy)) and ((maxhx is None) or (hex.hx <= maxhx)) and ((maxhy is None) or (maxhy <= hex.hy)):
return hex
return None
def find_fitting_font_nw_vertex(self, stro, g):
fontFits=false
maxFontSize = 14
minFontSize = 6
currentFont = Font("SansSerif", Font.PLAIN, maxFontSize)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = 4
oy = self.h * 0.5 - 1
while not fontFits:
# print ox, oy, cw, ch
if self.hextophalfpoly.contains(ox, oy-ch, cw, ch):
fontFits=true
elif size <= minFontSize:
print "warning, couldn't fit words..."
fontFits=true
else:
currentFont = Font(name, style, size-1)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = 4
oy = self.h * 0.5 - 1
return (currentFont, ox, oy,)
def find_fitting_font_top_half(self, stro, g):
fontFits=false
maxFontSize = 14
minFontSize = 6
currentFont = Font("SansSerif", Font.PLAIN, maxFontSize)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = (self.w - cw) * 0.5
oy = self.h * 0.5 - 1
while not fontFits:
# print ox, oy, cw, ch
if self.hextophalfpoly.contains(ox, oy-ch, cw, ch):
fontFits=true
elif size <= minFontSize:
print "warning, couldn't fit words..."
fontFits=true
else:
currentFont = Font(name, style, size-1)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = (self.w - cw) * 0.5
oy = self.h * 0.5 - 1
return (currentFont, ox, oy,)
def find_fitting_font_bottom_half(self, stro, g):
fontFits=false
maxFontSize = 14
minFontSize = 6
currentFont = Font("SansSerif", Font.PLAIN, maxFontSize)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = (self.w - cw) * 0.5
oy = self.h * 0.5 + ch
while not fontFits:
if self.hexbottomhalfpoly.contains(ox, oy-ch, cw, ch):
fontFits=true
elif size <= minFontSize:
fontFits=true
else:
currentFont = Font(name, style, size-1)
currentMetrics = g.getFontMetrics(currentFont)
size = currentFont.getSize()
name = currentFont.getName()
style = currentFont.getStyle()
cw = currentMetrics.stringWidth(stro)
ch = currentMetrics.getHeight()
ox = (self.w - cw) * 0.5
oy = self.h * 0.5 + ch
return (currentFont, ox, oy,)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.