repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
huggingface/transformers | src/transformers/commands/lfs.py | 2 | 7951 | """
Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs.
Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
To launch debugger while developing:
``` [lfs "customtransfer.multipart"]
path = /path/to/transformers/.env/bin/python
args = -m debugpy --listen 5678 --wait-for-client /path/to/transformers/src/transformers/commands/transformers_cli.py
lfs-multipart-upload ```
"""
import json
import os
import subprocess
import sys
import warnings
from argparse import ArgumentParser
from contextlib import AbstractContextManager
from typing import Dict, List, Optional
import requests
from ..utils import logging
from . import BaseTransformersCLICommand
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
class LfsCommands(BaseTransformersCLICommand):
"""
Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload
large files >5GB 🔥. Spec for LFS custom transfer agent is:
https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
This introduces two commands to the CLI:
1. $ transformers-cli lfs-enable-largefiles
This should be executed once for each model repo that contains a model file >5GB. It's documented in the error
message you get if you just try to git push a 5GB file without having enabled it before.
2. $ transformers-cli lfs-multipart-upload
This command is called by lfs directly and is not meant to be called by the user.
"""
@staticmethod
def register_subcommand(parser: ArgumentParser):
enable_parser = parser.add_parser(
"lfs-enable-largefiles",
help="Deprecated: use `huggingface-cli` instead. "
"Configure your repository to enable upload of files > 5GB.",
)
enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
upload_parser = parser.add_parser(
LFS_MULTIPART_UPLOAD_COMMAND,
help="Deprecated: use `huggingface-cli` instead. "
"Command will get called by git-lfs, do not call it directly.",
)
upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
class LfsEnableCommand:
def __init__(self, args):
self.args = args
def run(self):
warnings.warn(
"Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead."
)
local_path = os.path.abspath(self.args.path)
if not os.path.isdir(local_path):
print("This does not look like a valid git repo.")
exit(1)
subprocess.run(
"git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path
)
subprocess.run(
f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
check=True,
cwd=local_path,
)
print("Local repo set up for largefiles")
def write_msg(msg: Dict):
"""Write out the message in Line delimited JSON."""
msg = json.dumps(msg) + "\n"
sys.stdout.write(msg)
sys.stdout.flush()
def read_msg() -> Optional[Dict]:
"""Read Line delimited JSON from stdin."""
msg = json.loads(sys.stdin.readline().strip())
if "terminate" in (msg.get("type"), msg.get("event")):
# terminate message received
return None
if msg.get("event") not in ("download", "upload"):
logger.critical("Received unexpected message")
sys.exit(1)
return msg
class FileSlice(AbstractContextManager):
"""
File-like object that only reads a slice of a file
Inspired by stackoverflow.com/a/29838711/593036
"""
def __init__(self, filepath: str, seek_from: int, read_limit: int):
self.filepath = filepath
self.seek_from = seek_from
self.read_limit = read_limit
self.n_seen = 0
def __enter__(self):
self.f = open(self.filepath, "rb")
self.f.seek(self.seek_from)
return self
def __len__(self):
total_length = os.fstat(self.f.fileno()).st_size
return min(self.read_limit, total_length - self.seek_from)
def read(self, n=-1):
if self.n_seen >= self.read_limit:
return b""
remaining_amount = self.read_limit - self.n_seen
data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount))
self.n_seen += len(data)
return data
def __iter__(self):
yield self.read(n=4 * 1024 * 1024)
def __exit__(self, *args):
self.f.close()
class LfsUploadCommand:
def __init__(self, args):
self.args = args
def run(self):
# Immediately after invoking a custom transfer process, git-lfs
# sends initiation data to the process over stdin.
# This tells the process useful information about the configuration.
init_msg = json.loads(sys.stdin.readline().strip())
if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
sys.exit(1)
# The transfer process should use the information it needs from the
# initiation structure, and also perform any one-off setup tasks it
# needs to do. It should then respond on stdout with a simple empty
# confirmation structure, as follows:
write_msg({})
# After the initiation exchange, git-lfs will send any number of
# transfer requests to the stdin of the transfer process, in a serial sequence.
while True:
msg = read_msg()
if msg is None:
# When all transfers have been processed, git-lfs will send
# a terminate event to the stdin of the transfer process.
# On receiving this message the transfer process should
# clean up and terminate. No response is expected.
sys.exit(0)
oid = msg["oid"]
filepath = msg["path"]
completion_url = msg["action"]["href"]
header = msg["action"]["header"]
chunk_size = int(header.pop("chunk_size"))
presigned_urls: List[str] = list(header.values())
parts = []
for i, presigned_url in enumerate(presigned_urls):
with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data:
r = requests.put(presigned_url, data=data)
r.raise_for_status()
parts.append(
{
"etag": r.headers.get("etag"),
"partNumber": i + 1,
}
)
# In order to support progress reporting while data is uploading / downloading,
# the transfer process should post messages to stdout
write_msg(
{
"event": "progress",
"oid": oid,
"bytesSoFar": (i + 1) * chunk_size,
"bytesSinceLast": chunk_size,
}
)
# Not precise but that's ok.
r = requests.post(
completion_url,
json={
"oid": oid,
"parts": parts,
},
)
r.raise_for_status()
write_msg({"event": "complete", "oid": oid})
| apache-2.0 |
hrashk/sympy | sympy/plotting/textplot.py | 1 | 2000 | from __future__ import print_function, division
from sympy import *
def textplot(expr, a, b, W=55, H=18):
"""
Print a crude ASCII art plot of the SymPy expression 'expr' (which
should contain a single symbol, e.g. x or something else) over the
interval [a, b].
Examples
========
textplot(sin(t)*t, 0, 15)
"""
free = expr.free_symbols
assert len(free) <= 1
x = free.pop() if free else Dummy()
f = lambdify([x], expr)
a = float(a)
b = float(b)
# Calculate function values
y = [0] * W
for x in range(W):
try:
y[x] = f(a + (b - a)/float(W)*x)
except (TypeError, ValueError):
y[x] = 0
# Normalize height to screen space
ma = max(y)
mi = min(y)
if ma == mi:
if ma:
mi, ma = sorted([0, 2*ma])
else:
mi, ma = -1, 1
for x in range(W):
y[x] = int(float(H)*(y[x] - mi)/(ma - mi))
margin = 7
print
for h in range(H - 1, -1, -1):
s = [' '] * W
for x in range(W):
if y[x] == h:
if (x == 0 or y[x - 1] == h - 1) and (x == W - 1 or y[x + 1] == h + 1):
s[x] = '/'
elif (x == 0 or y[x - 1] == h + 1) and (x == W - 1 or y[x + 1] == h - 1):
s[x] = '\\'
else:
s[x] = '.'
# Print y values
if h == H - 1:
prefix = ("%g" % ma).rjust(margin)[:margin]
elif h == H//2:
prefix = ("%g" % ((mi + ma)/2)).rjust(margin)[:margin]
elif h == 0:
prefix = ("%g" % mi).rjust(margin)[:margin]
else:
prefix = " "*margin
s = "".join(s)
if h == H//2:
s = s.replace(" ", "-")
print(prefix + " | " + s)
# Print x values
bottom = " " * (margin + 3)
bottom += ("%g" % a).ljust(W//2 - 4)
bottom += ("%g" % ((a + b)/2)).ljust(W//2)
bottom += "%g" % b
print(bottom)
| bsd-3-clause |
vamin/MESS.DB | mess/utils.py | 1 | 5267 | # -*- coding: utf-8 -*-
# Copyright 2013-2014 Victor Amin, http://vamin.net/
"""MESS.DB utilities module
This module contains helper functions and classes that are used by many mess
modules.
"""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import hashlib
import json
import os
import resource
import sys
def get_inchikey_dir(inchikey):
"""Convert InChIKey into a path.
Args:
inchikey: An InChIKey string.
Returns:
Absolute path of the form e.g.:
path/to/molecules/C/PE/LXLSAUQHCOX-UHFFFAOYSA-M/
"""
molecules_dir = os.path.join(os.path.dirname(__file__), '../molecules/')
return os.path.abspath(os.path.join(molecules_dir, inchikey[:1],
inchikey[1:3], inchikey[3:]))
def get_mem_usage():
"""Get the current memory usage.
Returns:
A human-readable string.
"""
usage = resource.getrusage(resource.RUSAGE_SELF)
return ('usertime=%s systime=%s '
'mem=%s mb') % (usage[0], usage[1],
(usage[2] * resource.getpagesize()) / 1000000.0)
def hash_dict(d):
"""Serialize and hash a dict.
Args:
d: A dict.
Returns:
A hex string of the sha1 hash of the JSON-serialized dict. Keys are
sorted.
"""
return hashlib.sha1(json.dumps(d, sort_keys=True)).hexdigest()
def is_inchikey(inchikey, enforce_standard=False):
"""Check if a string is a valid InChIKey.
Args:
inchikey: A supposed InChIkey string.
enfore_standard: Make sure InChIKey is "standard". Default: False.
Returns:
boolean
"""
if '=' in inchikey:
inchikey = inchikey.split('=')[1]
if len(inchikey) == 27:
s = inchikey.split('-')
try:
if len(s[0]) == 14 and len(s[1]) == 10 and len(s[2]) == 1:
if s[0].isalpha() and s[1].isalpha() and s[2].isalpha():
if not enforce_standard or s[1][-2] == 'S':
return True
except IndexError:
pass
return False
def load_method(method_name):
"""Locate a method in mess/methods and return an instance of it."""
try:
module = __import__('mess.methods.%s' % method_name,
fromlist=['methods'])
method = module.load()
except ImportError as err:
print('Error: %s;' % err, file=sys.stderr)
sys.exit('\'%s\' is not a valid method.' % method_name)
return method
def setup_dir(directory):
"""If directory does not exist, create it and its parents."""
if not os.path.isdir(directory):
os.makedirs(directory)
def touch(fname, times=None):
"""Update the timestamp on a file."""
fhandle = file(fname, 'a')
try:
os.utime(fname, times)
finally:
fhandle.close()
def unicode_replace(x, enc='utf-8', err='replace'):
"""Convert str to unicode.
Args:
x: A string.
enc: Encoding of input string, defaults to 'utf-8'.
err: What to do on unicode conversion error, defaults to 'replace'.
Returns:
Unicode string if x is str, x otherwise.
"""
if isinstance(x, str):
return unicode(x, enc, err)
else:
return x
def xstr(s):
"""Return str(), except that None returns empty string."""
if s is None:
return ''
else:
return str(s)
class CustomArgparseFormatter(argparse.HelpFormatter):
"""Custom formatter for setting argparse formatter_class. Very similar to
ArgumentDefaultsHelpFormatter, except that:
1) (default: %%(default)s) is not shown if it is set to None or False, or
if it is already described in the help text, and
2) very long option strings are split into two lines.
"""
def _get_help_string(self, action):
help_ = action.help
if '(default' not in action.help:
if (action.default not in (argparse.SUPPRESS, None, False)
or action.default is 0):
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help_ += ' (default: %(default)s)'
return help_
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
if sum(len(s) for s in parts) < self._width - (len(parts) - 1) * 2:
return ', '.join(parts)
else:
return ',\n '.join(parts)
| agpl-3.0 |
thepiper/standoff | vpy/lib/python2.7/site-packages/flask/testsuite/views.py | 561 | 5068 | # -*- coding: utf-8 -*-
"""
flask.testsuite.views
~~~~~~~~~~~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import flask.views
import unittest
from flask.testsuite import FlaskTestCase
from werkzeug.http import parse_set_header
class ViewTestCase(FlaskTestCase):
def common_test(self, app):
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
self.assert_equal(c.post('/').data, b'POST')
self.assert_equal(c.put('/').status_code, 405)
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_basic_view(self):
app = flask.Flask(__name__)
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_method_based_view(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_view_patching(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
view = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
self.common_test(app)
def test_view_inheritance(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
c = app.test_client()
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST'])
def test_view_decorators(self):
app = flask.Flask(__name__)
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.headers['X-Parachute'], 'awesome')
self.assert_equal(rv.data, b'Awesome')
def test_implicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Blub')
self.assert_equal(rv.headers['X-Method'], 'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_explicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_endpoint_override(self):
app = flask.Flask(__name__)
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with self.assert_raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
self.common_test(app)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ViewTestCase))
return suite
| gpl-3.0 |
starqiu/PythonLearn | Django-1.6.5/tests/files/tests.py | 6 | 7421 | from __future__ import absolute_import
from io import BytesIO
import os
import gzip
import shutil
import tempfile
from django.core.cache import cache
from django.core.files import File
from django.core.files.move import file_move_safe
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.temp import NamedTemporaryFile
from django.test import TestCase
from django.utils import unittest
from django.utils.six import StringIO
from .models import Storage, temp_storage, temp_storage_location
class FileStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
temp_storage.save('tests/default.txt', ContentFile('default content'))
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertEqual(sorted(files), ["default.txt", "django_test.txt"])
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(
sorted(files), ["assignment.txt", "default.txt", "django_test.txt"]
)
# Files can be read in a little at a time, if necessary.
obj1.normal.open()
self.assertEqual(obj1.normal.read(3), b"con")
self.assertEqual(obj1.normal.read(), b"tent")
self.assertEqual(list(obj1.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj1.normal.close()
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
# Push the objects into the cache to make sure they pickle properly
cache.set("obj1", obj1)
cache.set("obj2", obj2)
self.assertEqual(cache.get("obj2").normal.name, "tests/django_test_1.txt")
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
# Default values allow an object to access a single file.
obj3 = Storage.objects.create()
self.assertEqual(obj3.default.name, "tests/default.txt")
self.assertEqual(obj3.default.read(), b"default content")
obj3.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj3.delete()
obj3 = Storage()
self.assertEqual(obj3.default.read(), b"default content")
obj3.default.close()
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj4 = Storage()
obj4.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj4.random.name.endswith("/random_file"))
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileTests(unittest.TestCase):
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
g = gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
handle_b, self.file_b = tempfile.mkstemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
| gpl-2.0 |
unaizalakain/django | django/contrib/staticfiles/utils.py | 335 | 1976 | import fnmatch
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| bsd-3-clause |
evstropov/django-webmoney-merchant | webmoney_merchant/forms.py | 1 | 2066 | import re
from django import forms
from django.utils.translation import ugettext_lazy as _
PURSE_RE = re.compile(r'^(?P<type>[BCDEGKRUXYZ])(?P<number>\d{12})$')
WMID_RE = re.compile(r'^\d{12}$')
class PaymentRequestForm(forms.Form):
LMI_PAYMENT_AMOUNT = forms.DecimalField(max_digits=7, decimal_places=2, widget=forms.HiddenInput())
LMI_PAYMENT_DESC_BASE64 = forms.CharField(widget=forms.HiddenInput())
LMI_PAYMENT_NO = forms.IntegerField(widget=forms.HiddenInput())
LMI_PAYEE_PURSE = forms.RegexField(regex=PURSE_RE, widget=forms.HiddenInput())
LMI_SIM_MODE = forms.IntegerField(initial="0", widget=forms.HiddenInput())
class BasePaymentForm(forms.Form):
LMI_PAYMENT_NO = forms.IntegerField(label=_('Payment Number'))
class ExtraPaymentForm(BasePaymentForm):
# Paymer
LMI_PAYMER_NUMBER = forms.CharField(required=False)
LMI_PAYMER_EMAIL = forms.EmailField(required=False)
# Telepat
LMI_TELEPAT_PHONENUMBER = forms.CharField(required=False)
LMI_TELEPAT_ORDERID = forms.CharField(required=False)
# Credit
LMI_PAYMENT_CREDITDAYS = forms.IntegerField(min_value=0, required=False)
class PrerequestForm(ExtraPaymentForm):
LMI_PREREQUEST = forms.BooleanField(label=_('Prerequest flag'), required=False)
LMI_PAYEE_PURSE = forms.RegexField(regex=PURSE_RE)
LMI_PAYMENT_AMOUNT = forms.DecimalField(max_digits=7, decimal_places=2, label=_('Amount'))
LMI_MODE = forms.IntegerField(label=_('Test mode'), min_value=0, max_value=1)
LMI_PAYER_WM = forms.RegexField(regex=WMID_RE)
LMI_PAYER_PURSE = forms.RegexField(regex=PURSE_RE)
class PayedPaymentForm(BasePaymentForm):
LMI_SYS_INVS_NO = forms.IntegerField()
LMI_SYS_TRANS_NO = forms.IntegerField()
LMI_SYS_TRANS_DATE = forms.DateTimeField(input_formats=['%Y%m%d %H:%M:%S'])
class PaymentNotificationForm(PrerequestForm, PayedPaymentForm):
LMI_HASH = forms.CharField()
class SettledPaymentForm(PayedPaymentForm, ExtraPaymentForm):
pass
class UnSettledPaymentForm(PayedPaymentForm, ExtraPaymentForm):
pass
| apache-2.0 |
EraYaN/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/bithdtv.py | 48 | 5237 | import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.bit-hdtv.com/',
'login': 'https://www.bit-hdtv.com/takelogin.php',
'login_check': 'https://www.bit-hdtv.com/messages.php',
'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
'search': 'https://www.bit-hdtv.com/torrents.php?',
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
query = self.buildUrl(media, quality)
url = "%s&%s" % (self.urls['search'], query)
data = self.getHTMLData(url)
if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
split_data = data.partition('-->')
if '## SELECT COUNT(' in split_data[0]:
data = split_data[2]
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'width': '750', 'class': ''})
if result_table is None:
return
entries = result_table.find_all('tr')
for result in entries[1:]:
cells = result.find_all('td')
link = cells[2].find('a')
torrent_id = link['href'].replace('/details.php?id=', '')
results.append({
'id': torrent_id,
'name': link.contents[0].get_text(),
'url': cells[0].find('a')['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].get_text()),
'seeders': tryInt(cells[8].string),
'leechers': tryInt(cells[9].string),
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
}
def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('table', attrs = {'class': 'detail'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess
config = [{
'name': 'bithdtv',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'BiT-HDTV',
'description': '<a href="https://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 |
aigamo/primecloud-controller | iaas-gw/src/iaasgw/controller/ec2/ec2LoadBalancercontroller.py | 5 | 25854 | # coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.log.log import IaasLogger
from iaasgw.module.ec2.ec2module import Listener, HealthCheck, \
LoadBalancerDescription
from iaasgw.utils.stringUtils import isNotEmpty, isBit
from sqlalchemy.sql.expression import and_
import traceback
class ec2LoadBalancercontroller(object):
logger = IaasLogger()
client = None
conn = None
platforminfo = None
STOPPED = "STOPPED"
STARTING = "STARTING"
RUNNING = "RUNNING"
STOPPING = "STOPPING"
CONFIGURING = "CONFIGURING"
WARNING = "WARNING"
STATUS={
STOPPED:STOPPED,
RUNNING:RUNNING,
STOPPING:STOPPING,
CONFIGURING:CONFIGURING,
WARNING:WARNING,
}
def __init__(self, platforminfo, ec2iaasclientLb, conn):
self.client = ec2iaasclientLb
self.conn = conn
self.platforminfo = platforminfo
def getStatusString(self, key):
if not key:
return "STOPPED"
value = self.STATUS[key]
if value != None:
return value
return "STOPPED"
def createLoadBalancer(self, farmNo, loadBalancerNo, availabilityZones, subnets, groupmap) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ作成情報
loadBalancerName = awsLoadBalancer["NAME"]
# 内部ロードバランサ
internal = awsLoadBalancer["INTERNAL"]
# デフォルトゾーンの特定 デフォルト1件目
availabilityZone = None
for zone in availabilityZones:
availabilityZone = zone.name
#セキュリティグループ
securityGroups = []
if (isNotEmpty(awsLoadBalancer["SECURITY_GROUPS"])):
securityGroups = awsLoadBalancer["SECURITY_GROUPS"].split(",")
#サブネットID
subnetIds = []
if (isNotEmpty(awsLoadBalancer["SUBNET_ID"])):
subnetIds = awsLoadBalancer["SUBNET_ID"].split(",")
# サブネット(VPC)との関係からセキュリティグループIDを取得
securityGroupIds = []
if len(subnetIds) != 0:
for subnet in subnets:
if subnetIds[0] == subnet.subnetId:
#セキュリティグループID
for group in securityGroups:
key = group+subnet.vpcId
securityGroupIds.append(groupmap[key])
# ダミーのリスナーの設定 instancePort, instanceProtocol, loadBalancerPort, protocol, sslCertificateId
listener = Listener("65535", None, "65535","TCP",None)
listeners = [listener]
# ロードバランサの作成
dnsName = self.client.createLoadBalancer(availabilityZone, listeners, loadBalancerName, subnetIds, securityGroupIds, internal)
#実行ログ
self.logger.info(None ,"IPROCESS-200111", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbCreate", ["EC2", loadBalancerName] )
# ダミーのリスナーの削除
self.client.deleteLoadBalancerListeners(["65535",], loadBalancerName)
#クロスゾーン負荷分散を有効化
self.client.modifyLoadBalancer(loadBalancerName)
#実行ログ
self.logger.info(None ,"IPROCESS-200226", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsCrossZoneEnabled", ["EC2", loadBalancerName] )
# データベース更新
updateDict = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
updateDict["DNS_NAME"] = dnsName
sql = tableAWSLB.update(tableAWSLB.c.LOAD_BALANCER_NO ==updateDict["LOAD_BALANCER_NO"], values=updateDict)
self.conn.execute(sql)
return dnsName;
def deleteLoadBalancer(self, farmNo, loadBalancerNo) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
try :
self.client.deleteLoadBalancer(loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200112", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbDelete", ["EC2", loadBalancerName] )
except Exception:
self.logger.error(traceback.format_exc())
# データベース更新
updateDict = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
updateDict["DNS_NAME"] = None
sql = tableAWSLB.update(tableAWSLB.c.LOAD_BALANCER_NO ==updateDict["LOAD_BALANCER_NO"], values=updateDict)
self.conn.execute(sql)
def configureListeners(self, farmNo, loadBalancerNo) :
table = self.conn.getTable("LOAD_BALANCER_LISTENER")
listeners = self.conn.select(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))
# リスナーの起動・停止処理
for listener in listeners :
status = self.getStatusString(listener["STATUS"])
if isBit(listener["ENABLED"]):
if status == self.STOPPED :
# 有効で停止しているリスナーは処理対象
self.startListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
elif status == self.RUNNING:
# 有効で起動しているリスナーの場合、処理を行わずにフラグを変更する
if isBit(listener["CONFIGURE"]):
listener["CONFIGURE"] = "0"
sql = table.update(and_(table.c.LOAD_BALANCER_NO ==listener["LOAD_BALANCER_NO"], table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
self.conn.execute(sql)
else :
if (status == self.RUNNING or status == self.WARNING) :
# 無効で起動または異常なリスナーは処理対象
self.stopListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
elif (status == self.STOPPED) :
# 無効で停止しているリスナーの場合、処理を行わずにフラグを変更する
if isBit(listener["CONFIGURE"]):
listener["CONFIGURE"] = "0"
sql = table.update(and_(table.c.LOAD_BALANCER_NO ==loadBalancerNo, table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
self.conn.execute(sql)
def startListener(self, farmNo, loadBalancerNo, loadBalancerPort) :
table = self.conn.getTable("LOAD_BALANCER_LISTENER")
listener = self.conn.selectOne(table.select(and_(table.c.LOAD_BALANCER_NO==loadBalancerNo, table.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
try :
# リスナー作成情報
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
sslKey = None
if (isNotEmpty(listener["SSL_KEY_NO"])):
# リスナー作成情報
tableAWSSSL = self.conn.getTable("AWS_SSL_KEY")
awsSslKey = self.conn.selectOne(tableAWSSSL.select(tableAWSSSL.c.KEY_NO==listener["SSL_KEY_NO"]))
sslKey = awsSslKey["SSLCERTIFICATEID"]
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# リスナーの設定 instancePort, instanceProtocol, loadBalancerPort, protocol, sslCertificateId
listeners = [ Listener(listener["SERVICE_PORT"], None, listener["LOAD_BALANCER_PORT"], listener["PROTOCOL"], sslKey),]
# リスナーの作成
self.client.createLoadBalancerListeners(listeners, loadBalancerName)
#実行ログ
self.logger.info(None ,"IPROCESS-200121", [awsLoadBalancer["NAME"], listener["LOAD_BALANCER_PORT"]])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbListenerCreate", ["EC2", loadBalancerName, listener["LOAD_BALANCER_PORT"]] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスを更新
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
updateDict = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.WARNING
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
raise
# ステータスを更新
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
updateDict = self.conn.selectOne(table.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.RUNNING
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
def stopListener(self, farmNo, loadBalancerNo, loadBalancerPort) :
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
listener = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
try :
# リスナー削除情報
table = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# ロードバランサポート
loadBalancerPort = listener["LOAD_BALANCER_PORT"]
loadBalancerPorts = [loadBalancerPort,]
# リスナーの削除
self.client.deleteLoadBalancerListeners(loadBalancerPorts, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200122", [awsLoadBalancer["NAME"], listener["LOAD_BALANCER_PORT"]])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbListenerDelete", ["EC2", loadBalancerName, listener["LOAD_BALANCER_PORT"]] )
except Exception, e:
self.logger.error(traceback.format_exc())
self.logger.warn(e.getMessage())
# ステータスを更新
updateDict = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.STOPPED
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
def configureHealthCheck(self, farmNo, loadBalancerNo) :
tableLBHC = self.conn.getTable("LOAD_BALANCER_HEALTH_CHECK")
healthCheck = self.conn.selectOne(tableLBHC.select(tableLBHC.c.LOAD_BALANCER_NO==loadBalancerNo))
# ヘルスチェック情報がない場合はスキップ
if not healthCheck :
return
# 現在のヘルスチェック設定を取得
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
#loadBalancerDescriptions = self.client.describeLoadBalancer(loadBalancerName)
#description = loadBalancerDescriptions[0]
description =LoadBalancerDescription(None, None , None, None, None, None, HealthCheck(None, 1, 2, 3, 4), None, None, None, None, None )
# ヘルスチェック設定を作成
target = str(healthCheck["CHECK_PROTOCOL"]) + ":" + str(healthCheck["CHECK_PORT"])
if (isNotEmpty(healthCheck["CHECK_PATH"])) :
if healthCheck["CHECK_PATH"].startswith('/') == False:
target = target + "/"
target = target + healthCheck["CHECK_PATH"]
healthCheck2 = HealthCheck(
healthCheck["HEALTHY_THRESHOLD"],
healthCheck["CHECK_INTERVAL"],
target,
healthCheck["CHECK_TIMEOUT"],
healthCheck["UNHEALTHY_THRESHOLD"])
# ヘルスチェック設定に変更がない場合はスキップ
if ((healthCheck2.target == description.healthCheck.target)
and (healthCheck2.timeout == description.healthCheck.timeout)
and (healthCheck2.interval == description.healthCheck.interval)
and (healthCheck2.healthyThreshold == description.healthCheck.healthyThreshold)
and (healthCheck2.unhealthyThreshold == description.healthCheck.unhealthyThreshold)) :
return
# ヘルスチェック設定を変更
self.client.configureHealthCheck(healthCheck2, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200131", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbHealthCheckConfig", ["EC2", loadBalancerName,] )
def applySecurityGroupsToLoadBalancer(self, farmNo, loadBalancerNo, groupmap, subnets) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# サブネットIDが設定されていなければリターン
subnetIds = []
if (isNotEmpty(awsLoadBalancer["SUBNET_ID"])):
subnetIds = awsLoadBalancer["SUBNET_ID"].split(",")
else:
return
#セキュリティグループ
securityGroups = []
if (isNotEmpty(awsLoadBalancer["SECURITY_GROUPS"])):
securityGroups = awsLoadBalancer["SECURITY_GROUPS"].split(",")
#IDへ変換
securityGroupIds = []
for subnet in subnets:
if subnetIds[0] == subnet.subnetId:
#セキュリティグループID
for group in securityGroups:
key = group+subnet.vpcId
securityGroupIds.append(groupmap[key])
# セキュリティグループ設定を変更
self.client.applySecurityGroupsToLoadBalancer(securityGroupIds, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200225", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbSecurityGroupsConfig", ["EC2", loadBalancerName,] )
def configureInstances(self, farmNo, loadBalancerNo) :
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
loadBalancerInstances = self.conn.select(tableLBINS.select(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo))
# 振り分け設定するインスタンスがない場合はスキップ
if not loadBalancerInstances or len(loadBalancerInstances) == 0:
return
tableLB = self.conn.getTable("LOAD_BALANCER")
loadBalancer = self.conn.selectOne(tableLB.select(tableLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# 振り分けを登録・解除するインスタンスを仕分けする
enabledInstances = []
disabledInstances = []
# 振り分けするインスタンス情報を取得
instanceMap = {}
for loadBalancerInstance in loadBalancerInstances :
table = self.conn.getTable("INSTANCE")
instanceNo = loadBalancerInstance["INSTANCE_NO"]
#インスタンス獲得
instance = self.conn.selectOne(table.select(table.c.INSTANCE_NO == instanceNo))
instanceMap.update({instanceNo:instance})
# ロードバランサが無効の場合は振り分けを解除する
if not isBit(loadBalancer["ENABLED"]):
disabledInstances.append(instance)
continue;
# インスタンスが無効の場合は振り分けを解除する
if not isBit(instance["ENABLED"]):
disabledInstances.append(instance);
continue;
if isBit(loadBalancerInstance["ENABLED"]):
enabledInstances.append(instance)
else :
disabledInstances.append(instance)
# 振り分けを登録する
self.registerInstances(farmNo, loadBalancerNo, enabledInstances, loadBalancerInstances)
# 振り分けを解除する
self.unregisterInstances(farmNo, loadBalancerNo, disabledInstances, loadBalancerInstances)
def registerInstances(self, farmNo, loadBalancerNo, instances, loadBalancerInstances) :
if not instances or len(instances) == 0:
# 振り分け登録するインスタンスがない場合はスキップ
return
# 振り分けされていないインスタンス番号を抽出
tmpInstances = []
for loadBalancerInstance in loadBalancerInstances:
for instance in instances:
if instance["INSTANCE_NO"] == loadBalancerInstance["INSTANCE_NO"] :
status = self.getStatusString(loadBalancerInstance["STATUS"])
if status == self.STOPPED :
tmpInstances.append(instance)
instances = tmpInstances
# 振り分けされていないインスタンスがない場合はスキップ
if not instances or len(instances) == 0:
return
# 起動しているインスタンス番号を抽出
tmpInstanceNos = []
for instance in instances:
status = self.getStatusString(instance["STATUS"])
if status == self.RUNNING:
tmpInstanceNos.append(instance)
instances = tmpInstanceNos;
if not instances or len(instances) == 0:
# 起動しているインスタンスがない場合はスキップ
return;
# AWSインスタンスのIDを取得
instanceIds = []
tableAWSINS = self.conn.getTable("AWS_INSTANCE")
for instance in instances:
awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_NO == instance["INSTANCE_NO"]))
instanceIds.append(awsInstance["INSTANCE_ID"])
try :
# 振り分け登録
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO == loadBalancerNo))
loadBalancerName = awsLoadBalancer["NAME"]
self.client.registerInstancesWithLoadBalancer(instanceIds, loadBalancerName)
for instanceid in instanceIds:
#実行ログ
self.logger.info(None ,"IPROCESS-200141", [awsLoadBalancer["NAME"], instanceid])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbInstancesRegist", ["EC2", loadBalancerName, instanceid] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.WARNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
raise
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.RUNNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
def unregisterInstances(self, farmNo, loadBalancerNo, instances, loadBalancerInstances) :
if not instances or len(instances) == 0:
# 振り分け登録するインスタンスがない場合はスキップ
return
# 振り分けされているインスタンス番号を抽出
tmpInstances = []
for loadBalancerInstance in loadBalancerInstances:
for instance in instances:
if instance["INSTANCE_NO"] == loadBalancerInstance["INSTANCE_NO"] :
status = self.getStatusString(loadBalancerInstance["STATUS"])
if status == self.RUNNING :
tmpInstances.append(instance)
instances = tmpInstances
if not instances or len(instances) == 0:
# 振り分けされているインスタンスがない場合はスキップ
return
# 起動しているインスタンス番号を抽出
tmpInstanceNos = []
for instance in instances:
status = self.getStatusString(instance["STATUS"])
if status == self.RUNNING:
tmpInstanceNos.append(instance)
instances = tmpInstanceNos;
if not instances or len(instances) == 0:
# 起動しているインスタンスがない場合はスキップ
return;
# AWSインスタンスのIDを取得
instanceIds = []
tableAWSINS = self.conn.getTable("AWS_INSTANCE")
for instance in instances:
awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_NO == instance["INSTANCE_NO"]))
instanceIds.append(awsInstance["INSTANCE_ID"])
try :
# 振り分け解除
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO == loadBalancerNo))
loadBalancerName = awsLoadBalancer["NAME"]
self.client.deregisterInstancesFromLoadBalancer(instanceIds, loadBalancerName)
for instanceid in instanceIds:
#実行ログ
self.logger.info(None ,"IPROCESS-200142", [awsLoadBalancer["NAME"], instanceid])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbInstancesDeregist", ["EC2", loadBalancerName, instanceid] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.WARNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
raise
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.STOPPED
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
| gpl-2.0 |
klothe/tablib | tablib/packages/xlrd3/xfcell.py | 54 | 8607 | # Author: mozman <mozman@gmx.at>
# Purpose: xfcell -- cell with convenient xf function
# Created: 04.12.2010
# Copyright (C) 2010, Manfred Moitzi
# License: BSD-style licence
"""
The XFCell() object contains the data for one cell.
WARNING: You don't call this class yourself. You access Cell objects
via methods of the Sheet object(s) that you found in the Book object that
was returned when you called xlrd.open_workbook("myfile.xls").
Cell objects have four attributes: `ctype` is an int, `value` (which depends
on `ctype`), `xf_index` and `sheet`, a reference to the containing sheet. If
**formatting_info** is not enabled when the workbook is opened, xf_index will
be **None**.
The following table describes the types of cells and how their values
are represented in Python.
=============== ===== ============ ==========================================
Type symbol Const Python value Note
=============== ===== ============ ==========================================
XL_CELL_EMPTY 0 ""
XL_CELL_TEXT 1 str
XL_CELL_NUMBER 2 float
XL_CELL_DATE 3 float
XL_CELL_BOOLEAN 4 int 1 means TRUE, 0 means FALSE
XL_CELL_ERROR 5 int representing internal Excel codes; for a
text representation, refer to the supplied
dictionary error_text_from_code
XL_CELL_BLANK 6 "" this type will appear only when
open_workbook(..., formatting_info=True)
is used.
=============== ===== ============ ==========================================
"""
import datetime
from .xldate import xldate_as_tuple
from .biffh import XL_CELL_DATE, BaseObject
class XFCell(BaseObject):
""" Extended Cell() class with convenient methods for easy access of cell
properties.
"""
__slots__ = ['sheet', 'ctype', 'value', 'xf']
def __init__(self, ctype, value, xf_index=None, sheet=None):
self.sheet = sheet
self.ctype = ctype
self.value = value
if xf_index is not None:
self.xf = self.book.xf_list[xf_index]
else:
self.xf = None
@property
def book(self):
return self.sheet.book
@property
def has_xf(self):
return (self.xf is not None)
@property
def xf_index(self):
if self.has_xf:
return self.xf.xf_index
else:
return None
@property
def parent_style(self):
return self.book.xf_list[self.xf.parent_style_index]
@property
def is_datetime(self):
return self.ctype == XL_CELL_DATE
@property
def has_date(self):
if self.is_datetime:
return self.value > 1.
return False
def get_color(self, index):
return self.book.colour_map[index]
def datetime(self):
""" Returns a datetime.datetime object if cell type is XL_CELL_DATE
else raises a TypeError, and raises ValueError if the the cell has
not date value (only time value is present).
"""
if self.is_datetime:
if self.has_date:
date = xldate_as_tuple(self.value, self.book.datemode)
return datetime.datetime(*date)
else:
raise ValueError("Cell has no date value.")
else:
raise TypeError("Cell is not a XL_CELL_DATE.")
def date(self):
""" Returns a datetime.date object if cell type is XL_CELL_DATE
else raises a **TypeError**. Raises **ValueError** if the cell
doesn't have a date value (only time value is present).
"""
dt = self.datetime()
return dt.date()
def time(self):
""" Returns a datetime.time object if cell type is XL_CELL_DATE else
raises a TypeError.
"""
if self.is_datetime:
date = xldate_as_tuple(self.value, self.book.datemode)
return datetime.time(date[3], date[4], date[5])
else:
raise TypeError("Cell is not a XL_CELL_DATE.")
#
# access the XFBackground() class
#
@property
def background(self):
if self.xf.is_style and \
self.xf._background_flag == 0:
return self.xf.background
elif self.xf._background_flag:
return self.xf.background
else:
return self.parent_style.background
def background_color(self):
""" Get cell background-color as 3-tuple. """
color_index = self.xf.background.background_colour_index
return self.get_color(color_index)
def fill_pattern(self):
return self.xf.background.fill_pattern
def pattern_color(self):
color_index = self.xf.background.pattern_colour_index
return self.get_color(color_index)
#
# access the Font() class
#
@property
def font_index(self):
if self.xf.is_style and \
self.xf._font_flag == 0:
return self.xf.font_index
elif self.xf._font_flag:
return self.xf.font_index
else:
return self.parent_style.font_index
@property
def font(self):
""" Get the Font() class. """
return self.book.font_list[self.xf.font_index]
def font_color(self):
""" Get cell foreground-color as 3-tuple. """
return self.get_color(self.font.colour_index)
#
# access the Format() class
#
@property
def format_key(self):
if self.xf.is_style and \
self.xf._format_flag == 0:
return self.xf.format_key
elif self.xf._format_flag:
return self.xf.format_key
else:
return self.parent_style.format_key
@property
def format(self):
""" Get the Format() class. """
return self.book.format_map[self.format_key]
def format_str(self):
""" Get the associated 'format_str'. """
return self.format.format_str
#
# access the XFAligment() class
#
@property
def alignment(self):
if self.xf.is_style and \
self.xf._alignment_flag == 0:
return self.xf.alignment
elif self.xf._alignment_flag:
return self.xf.alignment
else:
return self.parent_style.alignment
#
# access the XFBorder() class
#
@property
def border(self):
if self.xf.is_style and \
self.xf._border_flag == 0:
return self.xf.border
elif self.xf._border_flag:
return self.xf.border
else:
return self.parent_style.border
def bordercolors(self):
""" Get border color as dict of rgb-color-tuples. """
border = self.border
return {
'top': self.get_color(border.top_colour_index),
'bottom': self.get_color(border.bottom_colour_index),
'left': self.get_color(border.left_colour_index),
'right': self.get_color(border.right_colour_index),
'diag': self.get_color(border.diag_colour_index),
}
def borderstyles(self):
""" Get border styles as dict of ints. """
border = self.border
return {
'top': border.top_line_style,
'bottom': border.bottom_line_style,
'left': border.left_line_style,
'right': border.right_line_style,
'diag': border.diag_line_style,
}
@property
def has_up_diag(self):
""" Draw a line across the cell from bottom left to top right. """
return bool(self.border.diag_up)
@property
def has_down_diag(self):
""" Draw a line across the cell from top left to bottom right. """
return bool(self.border.diag_down)
#
# access the XFProtection() class
#
@property
def protection(self):
if self.xf.is_style and \
self.xf._protection_flag == 0:
return self.xf.protection
elif self.xf._protection_flag:
return self.xf.protection
else:
return self.parent_style.protection
@property
def is_cell_locked(self):
return bool(self.protection.cell_locked)
@property
def is_formula_hidden(self):
return bool(self.protection.cell_locked)
| mit |
the76thHunter/tmdbsimple | tests/test_account.py | 1 | 7205 | # -*- coding: utf-8 -*-
"""
test_account.py
~~~~~~~~~~~~~~~
This test suite checks the methods of the Account class of tmdbsimple.
Created by Celia Oakley on 2013-11-05
:copyright: (c) 2013-2014 by Celia Oakley.
:license: GPLv3, see LICENSE for more details.
"""
import unittest
import tmdbsimple as tmdb
from tests import API_KEY, SESSION_ID, USERNAME, PASSWORD
tmdb.API_KEY = API_KEY
"""
Constants
"""
MOVIETITLE = 'The Brother from Another Planet'
TVTITLE = 'Breaking Bad'
FAVORITE_MOVIE_ID = 62211
WATCHLIST_MEDIA_ID = 11
LIST_ID = '509ec17b19c2950a0600050d'
LIST_CREATED_BY = 'Travis Bell'
LIST_MOVIE_ID = 76203 # Argo
LIST_NAME = 'My newly created list'
LIST_DESCRIPTION = 'No duplicates here'
LIST_ITEM_MEDIA_ID = 550
"""
Status codes and messages
"""
SUCCESSFUL_UPDATE = 12
SUCCESSFUL_DELETE = 13
SUCCESS_PERIOD = 'Success.'
class AccountTestCase(unittest.TestCase):
# run this test with a valid session_id and authenticated account
def test_account_info(self):
username = USERNAME
acct = tmdb.Account(SESSION_ID)
response = acct.info()
self.assertEqual(acct.username, username)
def test_account_lists(self):
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
response = acct.lists()
self.assertTrue(hasattr(acct, 'results'))
def test_account_favorite_movies(self):
movietitle = MOVIETITLE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
response = acct.favorite_movies()
self.assertEqual(acct.results[0]['title'], movietitle)
def test_account_favorite_tv(self):
tvtitle = TVTITLE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
response = acct.favorite_tv()
self.assertEqual(acct.results[0]['name'], tvtitle)
def test_account_favorite(self):
status_code = SUCCESSFUL_UPDATE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {
'media_type': 'movie',
'movie_id': FAVORITE_MOVIE_ID,
'favorite': True,
}
response = acct.favorite(**kwargs)
self.assertEqual(acct.status_code, status_code)
def test_account_rated_movies(self):
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {'page': 1, 'sort_by': 'created_at.asc'}
response = acct.rated_movies(**kwargs)
self.assertTrue(hasattr(acct, 'results'))
def test_account_rated_tv(self):
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {'page': 1, 'sort_by': 'created_at.asc'}
response = acct.rated_tv(**kwargs)
self.assertTrue(hasattr(acct, 'results'))
def test_account_watchlist_movies(self):
movietitle = MOVIETITLE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {'page': 1, 'sort_by': 'created_at.asc'}
response = acct.watchlist_movies(**kwargs)
self.assertEqual(acct.results[0]['title'], movietitle)
def test_account_watchlist_tv(self):
tvtitle = TVTITLE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {'page': 1, 'sort_by': 'created_at.asc'}
response = acct.watchlist_tv(**kwargs)
self.assertEqual(acct.results[0]['name'], tvtitle)
def test_account_watchlist(self):
status_code = SUCCESSFUL_UPDATE
acct = tmdb.Account(SESSION_ID)
response = acct.info() # to set acct.id
kwargs = {
'media_type': 'movie',
'media_id': WATCHLIST_MEDIA_ID,
'watchlist': 'true',
}
response = acct.watchlist(**kwargs)
self.assertEqual(acct.status_code, status_code)
class AuthenticationTestCase(unittest.TestCase):
def test_authentication_token_new(self):
success = True
auth = tmdb.Authentication()
response = auth.token_new()
#print(auth.request_token)
self.assertEqual(auth.success, success)
# test_authentication_token_validate_with_login(self):
kwargs = {
'request_token': auth.request_token,
'username': USERNAME,
'password': PASSWORD,
}
success = True
auth = tmdb.Authentication()
response = auth.token_validate_with_login(**kwargs)
self.assertEqual(auth.success, success)
# test_authentication_session_new(self):
kwargs = {'request_token': auth.request_token}
success = True
auth = tmdb.Authentication()
response = auth.session_new(**kwargs)
#print(auth.session_id)
self.assertEqual(auth.success, success)
def test_authentication_guest_session_new(self):
success = True
auth = tmdb.Authentication()
response = auth.guest_session_new()
self.assertEqual(auth.success, success)
class GuestSessionsTestCase(unittest.TestCase):
def test_guest_sessions_rated_movies(self):
# get a guest session id
auth = tmdb.Authentication()
response = auth.guest_session_new()
guest_session_id = auth.guest_session_id
# get a list of rated movies for the guest session id
guest_session = tmdb.GuestSessions(guest_session_id)
response = guest_session.rated_movies()
self.assertTrue(hasattr(guest_session, 'results'))
class ListsTestCase(unittest.TestCase):
def test_lists_info(self):
id = LIST_ID
created_by = LIST_CREATED_BY
lst = tmdb.Lists(id)
response = lst.info()
self.assertEqual(lst.created_by, created_by)
def test_lists_item_status(self):
id = LIST_ID
movie_id = LIST_MOVIE_ID
lst = tmdb.Lists(id)
response = lst.item_status(movie_id=movie_id)
self.assertTrue(hasattr(lst, 'item_present'))
def test_lists_create_add_remove_clear_delete(self):
kwargs = {
'name': LIST_NAME,
'description': LIST_DESCRIPTION,
}
status_message = SUCCESS_PERIOD
lst = tmdb.Lists(0, SESSION_ID)
#print(lst.session_id)
response = lst.create_list(**kwargs)
self.assertEqual(lst.status_message, status_message)
list_id = lst.list_id
status_code = SUCCESSFUL_UPDATE
lst = tmdb.Lists(list_id, SESSION_ID)
response = lst.add_item(media_id=LIST_ITEM_MEDIA_ID)
self.assertEqual(lst.status_code, status_code)
status_code = SUCCESSFUL_DELETE
lst = tmdb.Lists(list_id, SESSION_ID)
response = lst.remove_item(media_id=LIST_ITEM_MEDIA_ID)
self.assertEqual(lst.status_code, status_code)
status_code = SUCCESSFUL_UPDATE
lst = tmdb.Lists(list_id, SESSION_ID)
response = lst.clear_list(confirm='true')
self.assertEqual(lst.status_code, status_code)
status_code = SUCCESSFUL_DELETE
lst = tmdb.Lists(list_id, SESSION_ID)
response = lst.delete_list()
self.assertEqual(lst.status_code, status_code)
| gpl-3.0 |
joariasl/odoo | addons/hr_recruitment/__openerp__.py | 260 | 2780 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recruitment Process',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Jobs, Recruitment, Applications, Job Interviews, Surveys',
'description': """
Manage job positions and the recruitment process
================================================
This application allows you to easily keep track of jobs, vacancies, applications, interviews...
It is integrated with the mail gateway to automatically fetch email sent to <jobs@yourcompany.com> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs.
You can define the different phases of interviews and easily rate the applicant from the kanban view.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'decimal_precision',
'hr',
'survey',
'calendar',
'fetchmail',
'web_kanban_gauge',
],
'data': [
'wizard/hr_recruitment_create_partner_job_view.xml',
'hr_recruitment_view.xml',
'hr_recruitment_menu.xml',
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'report/hr_recruitment_report_view.xml',
'hr_recruitment_installer_view.xml',
'res_config_view.xml',
'survey_data_recruitment.xml',
'hr_recruitment_data.xml',
'views/hr_recruitment.xml',
],
'demo': ['hr_recruitment_demo.xml'],
'test': ['test/recruitment_process.yml'],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
csb-toolbox/CSB | csb/test/cases/bio/io/mrc/__init__.py | 1 | 2664 | import csb.test as test
from csb.io import MemoryStream
from csb.bio.io.mrc import DensityMapReader, DensityMapWriter, DensityMapFormatError, HeaderInfo, ByteOrder
@test.unit
class TestDensityMapReader(test.Case):
def setUp(self):
super(TestDensityMapReader, self).setUp()
self.file = self.config.getTestFile('1C3W_10.mrc')
self.reader = DensityMapReader(self.file)
self.rawheader = None
with open(self.file, 'rb') as stream:
self.rawheader = self.reader._rawheader(stream)
def testReadRawHeader(self):
self.assertEqual(len(self.rawheader), DensityMapReader.HEADER_SIZE)
def testReadHeader(self):
density = self.reader.read_header()
self.assertEqual(density.data, None)
self.assertEqual(density.header, self.rawheader)
self.assertEqual(density.origin, [-36.0, -36.0, -36.0])
self.assertEqual(density.shape, (72, 72, 72))
self.assertEqual(density.spacing, (1.0, 1.0, 1.0))
def testRead(self):
density = self.reader.read()
self.assertIsNotNone(density.data)
self.assertEqual(density.header, self.rawheader)
self.assertEqual(density.origin, [-36.0, -36.0, -36.0])
self.assertEqual(density.shape, (72, 72, 72))
self.assertEqual(density.spacing, (1.0, 1.0, 1.0))
@test.unit
class TestDensityMapWriter(test.Case):
def setUp(self):
super(TestDensityMapWriter, self).setUp()
self.file = self.config.getTestFile('1C3W_10.mrc')
self.writer = DensityMapWriter()
self.reader = DensityMapReader(self.file)
self.density = self.reader.read()
def testWriteDensity(self):
with self.config.getTempStream(mode='b') as temp:
with open(self.file, 'rb') as source:
self.writer.write(temp, self.density)
temp.flush()
if temp.content() != source.read():
self.fail('binary strings differ')
def testReconstructHeader(self):
raw = self.density.header
self.density.header = None
new = self.writer.reconstruct_header(self.density)
original = self.reader._inspect(raw, ByteOrder.NATIVE)
generated = self.reader._inspect(new, ByteOrder.NATIVE)
for o, g in zip(original, generated):
self.assertAlmostEqual(o, g, places=4)
if __name__ == '__main__':
test.Console()
| mit |
zuotingbing/spark | dev/create-release/translate-contributors.py | 104 | 12627 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script translates invalid authors in the contributors list generated
# by generate-contributors.py. When the script encounters an author name that
# is considered invalid, it searches Github and JIRA in an attempt to search
# for replacements. This tool runs in two modes:
#
# (1) Interactive mode: For each invalid author name, this script presents
# all candidate replacements to the user and awaits user response. In this
# mode, the user may also input a custom name. This is the default.
#
# (2) Non-interactive mode: For each invalid author name, this script replaces
# the name with the first valid candidate it can find. If there is none, it
# uses the original name. This can be enabled through the --non-interactive flag.
import os
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", None)
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", None)
GITHUB_API_TOKEN = os.environ.get("GITHUB_API_TOKEN", None)
if not JIRA_USERNAME or not JIRA_PASSWORD:
sys.exit("Both JIRA_USERNAME and JIRA_PASSWORD must be set")
if not GITHUB_API_TOKEN:
sys.exit("GITHUB_API_TOKEN must be set")
# Write new contributors list to <old_file_name>.final
if not os.path.isfile(contributors_file_name):
print("Contributors file %s does not exist!" % contributors_file_name)
print("Have you run ./generate-contributors.py yet?")
sys.exit(1)
contributors_file = open(contributors_file_name, "r")
warnings = []
# In non-interactive mode, this script will choose the first replacement that is valid
INTERACTIVE_MODE = True
if len(sys.argv) > 1:
options = set(sys.argv[1:])
if "--non-interactive" in options:
INTERACTIVE_MODE = False
if INTERACTIVE_MODE:
print("Running in interactive mode. To disable this, provide the --non-interactive flag.")
# Setup Github and JIRA clients
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
github_client = Github(GITHUB_API_TOKEN)
# Load known author translations that are cached locally
known_translations = {}
known_translations_file_name = "known_translations"
known_translations_file = open(known_translations_file_name, "r")
for line in known_translations_file:
if line.startswith("#"):
continue
[old_name, new_name] = line.strip("\n").split(" - ")
known_translations[old_name] = new_name
known_translations_file.close()
# Open again in case the user adds new mappings
known_translations_file = open(known_translations_file_name, "a")
# Generate candidates for the given author. This should only be called if the given author
# name does not represent a full name as this operation is somewhat expensive. Under the
# hood, it makes several calls to the Github and JIRA API servers to find the candidates.
#
# This returns a list of (candidate name, source) 2-tuples. E.g.
# [
# (NOT_FOUND, "No full name found for Github user andrewor14"),
# ("Andrew Or", "Full name of JIRA user andrewor14"),
# ("Andrew Orso", "Full name of SPARK-1444 assignee andrewor14"),
# ("Andrew Ordall", "Full name of SPARK-1663 assignee andrewor14"),
# (NOT_FOUND, "No assignee found for SPARK-1763")
# ]
NOT_FOUND = "Not found"
def generate_candidates(author, issues):
candidates = []
# First check for full name of Github user
github_name = get_github_name(author, github_client)
if github_name:
candidates.append((github_name, "Full name of Github user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for Github user %s" % author))
# Then do the same for JIRA user
jira_name = get_jira_name(author, jira_client)
if jira_name:
candidates.append((jira_name, "Full name of JIRA user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for JIRA user %s" % author))
# Then do the same for the assignee of each of the associated JIRAs
# Note that a given issue may not have an assignee, or the assignee may not have a full name
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
except JIRAError as e:
# Do not exit just because an issue is not found!
if e.status_code == 404:
warnings.append("Issue %s not found!" % issue)
continue
raise e
jira_assignee = jira_issue.fields.assignee
if jira_assignee:
user_name = jira_assignee.name
display_name = jira_assignee.displayName
if display_name:
candidates.append(
(display_name, "Full name of %s assignee %s" % (issue, user_name)))
else:
candidates.append(
(NOT_FOUND, "No full name found for %s assignee %s" % (issue, user_name)))
else:
candidates.append((NOT_FOUND, "No assignee found for %s" % issue))
# Guard against special characters in candidate names
# Note that the candidate name may already be in unicode (JIRA returns this)
for i, (candidate, source) in enumerate(candidates):
try:
candidate = unicode(candidate, "UTF-8")
except TypeError:
# already in unicode
pass
candidate = unidecode.unidecode(candidate).strip()
candidates[i] = (candidate, source)
return candidates
# Translate each invalid author by searching for possible candidates from Github and JIRA
# In interactive mode, this script presents the user with a list of choices and have the user
# select from this list. Additionally, the user may also choose to enter a custom name.
# In non-interactive mode, this script picks the first valid author name from the candidates
# If no such name exists, the original name is used (without the JIRA numbers).
print("\n========================== Translating contributor list ==========================")
lines = contributors_file.readlines()
contributions = []
for i, line in enumerate(lines):
# It is possible that a line in the contributor file only has the github name, e.g. yhuai.
# So, we need a strip() to remove the newline.
temp_author = line.strip(" * ").split(" -- ")[0].strip()
print("Processing author %s (%d/%d)" % (temp_author, i + 1, len(lines)))
if not temp_author:
error_msg = " ERROR: Expected the following format \" * <author> -- <contributions>\"\n"
error_msg += " ERROR: Actual = %s" % line
print(error_msg)
warnings.append(error_msg)
contributions.append(line)
continue
author = temp_author.split("/")[0]
# Use the local copy of known translations where possible
if author in known_translations:
line = line.replace(temp_author, known_translations[author])
elif not is_valid_author(author):
new_author = author
issues = temp_author.split("/")[1:]
candidates = generate_candidates(author, issues)
# Print out potential replacement candidates along with the sources, e.g.
# [X] No full name found for Github user andrewor14
# [X] No assignee found for SPARK-1763
# [0] Andrew Or - Full name of JIRA user andrewor14
# [1] Andrew Orso - Full name of SPARK-1444 assignee andrewor14
# [2] Andrew Ordall - Full name of SPARK-1663 assignee andrewor14
# [3] andrewor14 - Raw Github username
# [4] Custom
candidate_names = []
bad_prompts = [] # Prompts that can't actually be selected; print these first.
good_prompts = [] # Prompts that contain valid choices
for candidate, source in candidates:
if candidate == NOT_FOUND:
bad_prompts.append(" [X] %s" % source)
else:
index = len(candidate_names)
candidate_names.append(candidate)
good_prompts.append(" [%d] %s - %s" % (index, candidate, source))
raw_index = len(candidate_names)
custom_index = len(candidate_names) + 1
for p in bad_prompts:
print(p)
if bad_prompts:
print(" ---")
for p in good_prompts:
print(p)
# In interactive mode, additionally provide "custom" option and await user response
if INTERACTIVE_MODE:
print(" [%d] %s - Raw Github username" % (raw_index, author))
print(" [%d] Custom" % custom_index)
response = raw_input(" Your choice: ")
last_index = custom_index
while not response.isdigit() or int(response) > last_index:
response = raw_input(" Please enter an integer between 0 and %d: " % last_index)
response = int(response)
if response == custom_index:
new_author = raw_input(" Please type a custom name for this author: ")
elif response != raw_index:
new_author = candidate_names[response]
# In non-interactive mode, just pick the first candidate
else:
valid_candidate_names = [name for name, _ in candidates
if is_valid_author(name) and name != NOT_FOUND]
if valid_candidate_names:
new_author = valid_candidate_names[0]
# Finally, capitalize the author and replace the original one with it
# If the final replacement is still invalid, log a warning
if is_valid_author(new_author):
new_author = capitalize_author(new_author)
else:
warnings.append(
"Unable to find a valid name %s for author %s" % (author, temp_author))
print(" * Replacing %s with %s" % (author, new_author))
# If we are in interactive mode, prompt the user whether we want to remember this new
# mapping
if INTERACTIVE_MODE and \
author not in known_translations and \
yesOrNoPrompt(
" Add mapping %s -> %s to known translations file?" % (author, new_author)):
known_translations_file.write("%s - %s\n" % (author, new_author))
known_translations_file.flush()
line = line.replace(temp_author, author)
contributions.append(line)
print("==================================================================================\n")
contributors_file.close()
known_translations_file.close()
# Sort the contributions before writing them to the new file.
# Additionally, check if there are any duplicate author rows.
# This could happen if the same user has both a valid full
# name (e.g. Andrew Or) and an invalid one (andrewor14).
# If so, warn the user about this at the end.
contributions.sort()
all_authors = set()
new_contributors_file_name = contributors_file_name + ".final"
new_contributors_file = open(new_contributors_file_name, "w")
for line in contributions:
author = line.strip(" * ").split(" -- ")[0]
if author in all_authors:
warnings.append("Detected duplicate author name %s. Please merge these manually." % author)
all_authors.add(author)
new_contributors_file.write(line)
new_contributors_file.close()
print("Translated contributors list successfully written to %s!" % new_contributors_file_name)
# Log any warnings encountered in the process
if warnings:
print("\n========== Warnings encountered while translating the contributor list ===========")
for w in warnings:
print(w)
print("Please manually correct these in the final contributors list at %s." %
new_contributors_file_name)
print("==================================================================================\n")
| apache-2.0 |
seaotterman/tensorflow | tensorflow/examples/adding_an_op/zero_out_op_1.py | 190 | 1053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_1.so'))
zero_out = _zero_out_module.zero_out
| apache-2.0 |
bayesimpact/bob-emploi | data_analysis/importer/deployments/uk/test/career_changers_test.py | 1 | 1456 | """Tests for the bob_emploi.data_analysis.importer.deployments.uk.career_changers module."""
import io
from os import path
import unittest
import requests_mock
from bob_emploi.data_analysis.importer.deployments.uk import career_changers
@requests_mock.mock()
class TestCareerChangers(unittest.TestCase):
"""Testing the main function."""
us_data_folder = path.join(path.dirname(__file__), '../../usa/test/testdata')
def test_basic_usage(self, mock_requests: requests_mock.Mocker) -> None:
"""Basic usage."""
mock_requests.get('http://api.lmiforall.org.uk/api/v1/o-net/onet2soc', json=[
{
'onetCode': '11-1011.00',
'socCodes': [{
'soc': 1115,
'title': 'Chief executives and senior officials',
}],
},
{
'onetCode': '13-1151.00',
'socCodes': [{
'soc': 3563,
'title': 'Vocational and industrial trainers and instructors',
}],
},
])
out = io.StringIO()
career_changers.main(
out, path.join(self.us_data_folder, 'onet_22_3/Career_Changers_Matrix.txt'))
output = io.StringIO(out.getvalue()).readlines()
self.assertEqual([
'job_group,target_job_group\n',
'1115,3563\n'], output)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py | 21 | 2144 | # mssql/zxjdbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname\
[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import MSDialect, MSExecutionContext
from ... import engine
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
| mit |
kaochiuan/HsinchuCityWebsite | HsinchuCityWebsite/HsinchuCityWebsite/app/templateModels.py | 1 | 1595 |
class temple(object):
def __init__(self, name, locateRegion, mastergod, religiousBelief, organizationType, location, phone1, phone2):
self.name = name
self.locateRegion = locateRegion
self.mastergod = mastergod
self.religiousBelief = religiousBelief
self.organizationType = organizationType
self.location = location
self.phone1 = phone1
self.phone2 = phone2
class latlng(object):
def __init__(self, lat,lng):
self.lat = lat
self.lng = lng
class location(object):
def __init__(self, address, latlng):
self.address = address
self.latlng = latlng
class cultureActiviy(object):
def __init__(self, activityTheme, startDate, endDate, time, name, locationName, location):
self.activityTheme = activityTheme
self.startDate = startDate
self.endDate = endDate
self.time = time
self.name = name
self.locationName = locationName
self.location = location
class cityNewes(object):
def __init__(self, title, publishDate, endDate, type, content, picturePath):
self.title = title
self.publishDate = publishDate
self.endDate = endDate
self.type = type
self.content = content
self.picturePath = picturePath
class hospitalReputation(object):
def __init__(self, name, location, positiveReputation, negativeReputation ):
self.name = name
self.location = location
self.positiveReputation = positiveReputation
self.negativeReputation = negativeReputation | mit |
hkariti/mopidy | tests/audio/test_actor.py | 11 | 19047 | from __future__ import absolute_import, unicode_literals
import threading
import unittest
import gobject
gobject.threads_init()
import mock
import pygst
pygst.require('0.10')
import gst # noqa
import pykka
from mopidy import audio
from mopidy.audio.constants import PlaybackState
from mopidy.internal import path
from tests import dummy_audio, path_to_data_dir
# We want to make sure both our real audio class and the fake one behave
# correctly. So each test is first run against the real class, then repeated
# against our dummy.
class BaseTest(unittest.TestCase):
config = {
'audio': {
'mixer': 'fakemixer track_max_volume=65536',
'mixer_track': None,
'mixer_volume': None,
'output': 'testoutput',
'visualizer': None,
}
}
uris = [path.path_to_uri(path_to_data_dir('song1.wav')),
path.path_to_uri(path_to_data_dir('song2.wav'))]
audio_class = audio.Audio
def setUp(self): # noqa: N802
config = {
'audio': {
'mixer': 'foomixer',
'mixer_volume': None,
'output': 'testoutput',
'visualizer': None,
},
'proxy': {
'hostname': '',
},
}
self.song_uri = path.path_to_uri(path_to_data_dir('song1.wav'))
self.audio = self.audio_class.start(config=config, mixer=None).proxy()
def tearDown(self): # noqa
pykka.ActorRegistry.stop_all()
def possibly_trigger_fake_playback_error(self):
pass
def possibly_trigger_fake_about_to_finish(self):
pass
class DummyMixin(object):
audio_class = dummy_audio.DummyAudio
def possibly_trigger_fake_playback_error(self):
self.audio.trigger_fake_playback_failure()
def possibly_trigger_fake_about_to_finish(self):
callback = self.audio.get_about_to_finish_callback().get()
if callback:
callback()
class AudioTest(BaseTest):
def test_start_playback_existing_file(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.assertTrue(self.audio.start_playback().get())
def test_start_playback_non_existing_file(self):
self.possibly_trigger_fake_playback_error()
self.audio.prepare_change()
self.audio.set_uri(self.uris[0] + 'bogus')
self.assertFalse(self.audio.start_playback().get())
def test_pause_playback_while_playing(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.assertTrue(self.audio.pause_playback().get())
def test_stop_playback_while_playing(self):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.assertTrue(self.audio.stop_playback().get())
@unittest.SkipTest
def test_deliver_data(self):
pass # TODO
@unittest.SkipTest
def test_end_of_data_stream(self):
pass # TODO
@unittest.SkipTest
def test_set_mute(self):
pass # TODO Probably needs a fakemixer with a mixer track
@unittest.SkipTest
def test_set_state_encapsulation(self):
pass # TODO
@unittest.SkipTest
def test_set_position(self):
pass # TODO
@unittest.SkipTest
def test_invalid_output_raises_error(self):
pass # TODO
class AudioDummyTest(DummyMixin, AudioTest):
pass
@mock.patch.object(audio.AudioListener, 'send')
class AudioEventTest(BaseTest):
def setUp(self): # noqa: N802
super(AudioEventTest, self).setUp()
self.audio.enable_sync_handler().get()
# TODO: test without uri set, with bad uri and gapless...
# TODO: playing->playing triggered by seek should be removed
# TODO: codify expected state after EOS
# TODO: consider returning a future or a threading event?
def test_state_change_stopped_to_playing_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.STOPPED,
new_state=PlaybackState.PLAYING, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_state_change_stopped_to_paused_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.STOPPED,
new_state=PlaybackState.PAUSED, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_state_change_paused_to_playing_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.audio.start_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.PAUSED,
new_state=PlaybackState.PLAYING, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_state_change_paused_to_stopped_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.audio.stop_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.PAUSED,
new_state=PlaybackState.STOPPED, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_state_change_playing_to_paused_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.audio.pause_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.PLAYING,
new_state=PlaybackState.PAUSED, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_state_change_playing_to_stopped_event(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.audio.stop_playback()
self.audio.wait_for_state_change().get()
call = mock.call('state_changed', old_state=PlaybackState.PLAYING,
new_state=PlaybackState.STOPPED, target_state=None)
self.assertIn(call, send_mock.call_args_list)
def test_stream_changed_event_on_playing(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
# Since we are going from stopped to playing, the state change is
# enough to ensure the stream changed.
self.audio.wait_for_state_change().get()
call = mock.call('stream_changed', uri=self.uris[0])
self.assertIn(call, send_mock.call_args_list)
def test_stream_changed_event_on_paused_to_stopped(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.audio.stop_playback()
self.audio.wait_for_state_change().get()
call = mock.call('stream_changed', uri=None)
self.assertIn(call, send_mock.call_args_list)
def test_position_changed_on_pause(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.audio.wait_for_state_change().get()
call = mock.call('position_changed', position=0)
self.assertIn(call, send_mock.call_args_list)
def test_position_changed_on_play(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.audio.wait_for_state_change().get()
call = mock.call('position_changed', position=0)
self.assertIn(call, send_mock.call_args_list)
def test_position_changed_on_seek(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.set_position(2000)
self.audio.wait_for_state_change().get()
call = mock.call('position_changed', position=0)
self.assertNotIn(call, send_mock.call_args_list)
def test_position_changed_on_seek_after_play(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change()
self.audio.set_position(2000)
self.audio.wait_for_state_change().get()
call = mock.call('position_changed', position=2000)
self.assertIn(call, send_mock.call_args_list)
def test_position_changed_on_seek_after_pause(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback()
self.audio.wait_for_state_change()
self.audio.set_position(2000)
self.audio.wait_for_state_change().get()
call = mock.call('position_changed', position=2000)
self.assertIn(call, send_mock.call_args_list)
def test_tags_changed_on_playback(self, send_mock):
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change().get()
send_mock.assert_any_call('tags_changed', tags=mock.ANY)
# Unlike the other events, having the state changed done is not
# enough to ensure our event is called. So we setup a threading
# event that we can wait for with a timeout while the track playback
# completes.
def test_stream_changed_event_on_paused(self, send_mock):
event = threading.Event()
def send(name, **kwargs):
if name == 'stream_changed':
event.set()
send_mock.side_effect = send
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.pause_playback().get()
self.audio.wait_for_state_change().get()
if not event.wait(timeout=1.0):
self.fail('Stream changed not reached within deadline')
def test_reached_end_of_stream_event(self, send_mock):
event = threading.Event()
def send(name, **kwargs):
if name == 'reached_end_of_stream':
event.set()
send_mock.side_effect = send
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.audio.wait_for_state_change().get()
self.possibly_trigger_fake_about_to_finish()
if not event.wait(timeout=1.0):
self.fail('End of stream not reached within deadline')
self.assertFalse(self.audio.get_current_tags().get())
def test_gapless(self, send_mock):
uris = self.uris[1:]
events = []
done = threading.Event()
def callback():
if uris:
self.audio.set_uri(uris.pop()).get()
def send(name, **kwargs):
events.append((name, kwargs))
if name == 'reached_end_of_stream':
done.set()
send_mock.side_effect = send
self.audio.set_about_to_finish_callback(callback).get()
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.possibly_trigger_fake_about_to_finish()
self.audio.wait_for_state_change().get()
self.possibly_trigger_fake_about_to_finish()
self.audio.wait_for_state_change().get()
if not done.wait(timeout=1.0):
self.fail('EOS not received')
# Check that both uris got played
self.assertIn(('stream_changed', {'uri': self.uris[0]}), events)
self.assertIn(('stream_changed', {'uri': self.uris[1]}), events)
# Check that events counts check out.
keys = [k for k, v in events]
self.assertEqual(2, keys.count('stream_changed'))
self.assertEqual(2, keys.count('position_changed'))
self.assertEqual(1, keys.count('state_changed'))
self.assertEqual(1, keys.count('reached_end_of_stream'))
# TODO: test tag states within gaples
def test_current_tags_are_blank_to_begin_with(self, send_mock):
self.assertFalse(self.audio.get_current_tags().get())
def test_current_tags_blank_after_end_of_stream(self, send_mock):
done = threading.Event()
def send(name, **kwargs):
if name == 'reached_end_of_stream':
done.set()
send_mock.side_effect = send
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.possibly_trigger_fake_about_to_finish()
self.audio.wait_for_state_change().get()
if not done.wait(timeout=1.0):
self.fail('EOS not received')
self.assertFalse(self.audio.get_current_tags().get())
def test_current_tags_stored(self, send_mock):
done = threading.Event()
tags = []
def callback():
tags.append(self.audio.get_current_tags().get())
def send(name, **kwargs):
if name == 'reached_end_of_stream':
done.set()
send_mock.side_effect = send
self.audio.set_about_to_finish_callback(callback).get()
self.audio.prepare_change()
self.audio.set_uri(self.uris[0])
self.audio.start_playback()
self.possibly_trigger_fake_about_to_finish()
self.audio.wait_for_state_change().get()
if not done.wait(timeout=1.0):
self.fail('EOS not received')
self.assertTrue(tags[0])
# TODO: test that we reset when we expect between songs
class AudioDummyEventTest(DummyMixin, AudioEventTest):
"""Exercise the AudioEventTest against our mock audio classes."""
# TODO: move to mixer tests...
class MixerTest(BaseTest):
@unittest.SkipTest
def test_set_mute(self):
for value in (True, False):
self.assertTrue(self.audio.set_mute(value).get())
self.assertEqual(value, self.audio.get_mute().get())
@unittest.SkipTest
def test_set_state_encapsulation(self):
pass # TODO
@unittest.SkipTest
def test_set_position(self):
pass # TODO
@unittest.SkipTest
def test_invalid_output_raises_error(self):
pass # TODO
class AudioStateTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.audio = audio.Audio(config=None, mixer=None)
def test_state_starts_as_stopped(self):
self.assertEqual(audio.PlaybackState.STOPPED, self.audio.state)
def test_state_does_not_change_when_in_gst_ready_state(self):
self.audio._handler.on_playbin_state_changed(
gst.STATE_NULL, gst.STATE_READY, gst.STATE_VOID_PENDING)
self.assertEqual(audio.PlaybackState.STOPPED, self.audio.state)
def test_state_changes_from_stopped_to_playing_on_play(self):
self.audio._handler.on_playbin_state_changed(
gst.STATE_NULL, gst.STATE_READY, gst.STATE_PLAYING)
self.audio._handler.on_playbin_state_changed(
gst.STATE_READY, gst.STATE_PAUSED, gst.STATE_PLAYING)
self.audio._handler.on_playbin_state_changed(
gst.STATE_PAUSED, gst.STATE_PLAYING, gst.STATE_VOID_PENDING)
self.assertEqual(audio.PlaybackState.PLAYING, self.audio.state)
def test_state_changes_from_playing_to_paused_on_pause(self):
self.audio.state = audio.PlaybackState.PLAYING
self.audio._handler.on_playbin_state_changed(
gst.STATE_PLAYING, gst.STATE_PAUSED, gst.STATE_VOID_PENDING)
self.assertEqual(audio.PlaybackState.PAUSED, self.audio.state)
def test_state_changes_from_playing_to_stopped_on_stop(self):
self.audio.state = audio.PlaybackState.PLAYING
self.audio._handler.on_playbin_state_changed(
gst.STATE_PLAYING, gst.STATE_PAUSED, gst.STATE_NULL)
self.audio._handler.on_playbin_state_changed(
gst.STATE_PAUSED, gst.STATE_READY, gst.STATE_NULL)
# We never get the following call, so the logic must work without it
# self.audio._handler.on_playbin_state_changed(
# gst.STATE_READY, gst.STATE_NULL, gst.STATE_VOID_PENDING)
self.assertEqual(audio.PlaybackState.STOPPED, self.audio.state)
class AudioBufferingTest(unittest.TestCase):
def setUp(self): # noqa: N802
self.audio = audio.Audio(config=None, mixer=None)
self.audio._playbin = mock.Mock(spec=['set_state'])
def test_pause_when_buffer_empty(self):
playbin = self.audio._playbin
self.audio.start_playback()
playbin.set_state.assert_called_with(gst.STATE_PLAYING)
playbin.set_state.reset_mock()
self.audio._handler.on_buffering(0)
playbin.set_state.assert_called_with(gst.STATE_PAUSED)
self.assertTrue(self.audio._buffering)
def test_stay_paused_when_buffering_finished(self):
playbin = self.audio._playbin
self.audio.pause_playback()
playbin.set_state.assert_called_with(gst.STATE_PAUSED)
playbin.set_state.reset_mock()
self.audio._handler.on_buffering(100)
self.assertEqual(playbin.set_state.call_count, 0)
self.assertFalse(self.audio._buffering)
def test_change_to_paused_while_buffering(self):
playbin = self.audio._playbin
self.audio.start_playback()
playbin.set_state.assert_called_with(gst.STATE_PLAYING)
playbin.set_state.reset_mock()
self.audio._handler.on_buffering(0)
playbin.set_state.assert_called_with(gst.STATE_PAUSED)
self.audio.pause_playback()
playbin.set_state.reset_mock()
self.audio._handler.on_buffering(100)
self.assertEqual(playbin.set_state.call_count, 0)
self.assertFalse(self.audio._buffering)
def test_change_to_stopped_while_buffering(self):
playbin = self.audio._playbin
self.audio.start_playback()
playbin.set_state.assert_called_with(gst.STATE_PLAYING)
playbin.set_state.reset_mock()
self.audio._handler.on_buffering(0)
playbin.set_state.assert_called_with(gst.STATE_PAUSED)
playbin.set_state.reset_mock()
self.audio.stop_playback()
playbin.set_state.assert_called_with(gst.STATE_NULL)
self.assertFalse(self.audio._buffering)
| apache-2.0 |
omnirom/android_external_chromium-org | build/android/provision_devices.py | 28 | 9689 | #!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import logging
import optparse
import os
import re
import subprocess
import sys
import time
from pylib import android_commands
from pylib import constants
from pylib import device_settings
from pylib.device import device_blacklist
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import run_tests_helper
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'android_testrunner'))
import errors
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout = subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
logging.info('An instance of host heart beart running... will kill')
pid = re.findall('(\S+)', match)[1]
subprocess.call(['kill', str(pid)])
def LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
logging.info('Spawning host heartbeat...')
subprocess.Popen([os.path.join(constants.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def PushAndLaunchAdbReboot(device, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
device: The DeviceUtils instance for the device to which the adb_reboot
binary should be pushed.
target: The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
logging.info('Will push and launch adb_reboot on %s' % str(device))
# Kill if adb_reboot is already running.
try:
# Don't try to kill adb_reboot more than once. We don't expect it to be
# running at all.
device.KillAll('adb_reboot', blocking=True, timeout=2, retries=0)
except device_errors.CommandFailedError:
# We can safely ignore the exception because we don't expect adb_reboot
# to be running.
pass
# Push adb_reboot
logging.info(' Pushing adb_reboot ...')
adb_reboot = os.path.join(constants.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles(adb_reboot, '/data/local/tmp/')
# Launch adb_reboot
logging.info(' Launching adb_reboot ...')
device.old_interface.GetAndroidToolStatusAndOutput(
'/data/local/tmp/adb_reboot')
def _ConfigureLocalProperties(device, is_perf):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'persist.sys.usb.config=adb',
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if not is_perf:
local_props.append('%s=all' % android_commands.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
'chmod 644 %s' % constants.DEVICE_LOCAL_PROPERTIES_PATH,
as_root=True)
except device_errors.CommandFailedError as e:
logging.warning(str(e))
# LOCAL_PROPERTIES_PATH = '/data/local.prop'
def WipeDeviceData(device):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.RunShellCommand('cat %s' % constants.ADB_KEYS_FILE,
as_root=True)
device.RunShellCommand('wipe data', as_root=True)
if device_authorized:
path_list = constants.ADB_KEYS_FILE.split('/')
dir_path = '/'.join(path_list[:len(path_list)-1])
device.RunShellCommand('mkdir -p %s' % dir_path, as_root=True)
device.RunShellCommand('restorecon %s' % dir_path, as_root=True)
device.RunShellCommand('echo %s > %s' %
(adb_keys[0], constants.ADB_KEYS_FILE), as_root=True)
for adb_key in adb_keys[1:]:
device.RunShellCommand(
'echo %s >> %s' % (adb_key, constants.ADB_KEYS_FILE), as_root=True)
device.RunShellCommand('restorecon %s' % constants.ADB_KEYS_FILE,
as_root=True)
def WipeDeviceIfPossible(device):
try:
device.EnableRoot()
WipeDeviceData(device)
# TODO(jbudorick): Tune the timeout per OS version.
device.Reboot(True, timeout=600, retries=0)
except (errors.DeviceUnresponsiveError, device_errors.CommandFailedError):
pass
def ProvisionDevice(device, options, is_perf):
try:
if not options.skip_wipe:
WipeDeviceIfPossible(device)
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
logging.warning(str(e))
_ConfigureLocalProperties(device, is_perf)
device_settings.ConfigureContentSettings(
device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
if options.disable_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_LOCATION_SETTINGS)
device_settings.SetLockScreenSettings(device)
if is_perf:
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
device_settings.ConfigureContentSettings(
device, device_settings.NETWORK_DISABLED_SETTINGS)
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
try:
battery_info = device.old_interface.GetBatteryInfo()
except Exception as e:
battery_info = {}
logging.error('Unable to obtain battery info for %s, %s',
str(device), e)
while int(battery_info.get('level', 100)) < 95:
if not device.old_interface.IsDeviceCharging():
if device.old_interface.CanControlUsbCharging():
device.old_interface.EnableUsbCharging()
else:
logging.error('Device is not charging')
break
logging.info('Waiting for device to charge. Current level=%s',
battery_info.get('level', 0))
time.sleep(60)
battery_info = device.old_interface.GetBatteryInfo()
device.RunShellCommand('date -u %f' % time.time(), as_root=True)
# TODO(jbudorick): Tune the timeout per OS version.
device.Reboot(True, timeout=600, retries=0)
props = device.RunShellCommand('getprop')
for prop in props:
logging.info(' %s' % prop)
if options.auto_reconnect:
PushAndLaunchAdbReboot(device, options.target)
except (errors.WaitForResponseTimedOutError,
device_errors.CommandTimeoutError):
logging.info('Timed out waiting for device %s. Adding to blacklist.',
str(device))
# Device black list is reset by bb_device_status_check.py per build.
device_blacklist.ExtendBlacklist([str(device)])
except (device_errors.CommandFailedError):
logging.info('Failed to provision device %s. Adding to blacklist.',
str(device))
device_blacklist.ExtendBlacklist([str(device)])
def ProvisionDevices(options):
is_perf = 'perf' in os.environ.get('BUILDBOT_BUILDERNAME', '').lower()
if options.device is not None:
devices = [options.device]
else:
devices = android_commands.GetAttachedDevices()
parallel_devices = device_utils.DeviceUtils.parallel(devices)
parallel_devices.pMap(ProvisionDevice, options, is_perf)
if options.auto_reconnect:
LaunchHostHeartbeat()
blacklist = device_blacklist.ReadBlacklist()
if all(d in blacklist for d in devices):
raise device_errors.NoDevicesError
return 0
def main(argv):
custom_handler = logging.StreamHandler(sys.stdout)
custom_handler.setFormatter(run_tests_helper.CustomFormatter())
logging.getLogger().addHandler(custom_handler)
logging.getLogger().setLevel(logging.INFO)
parser = optparse.OptionParser()
parser.add_option('--skip-wipe', action='store_true', default=False,
help="Don't wipe device data during provisioning.")
parser.add_option('--disable-location', action='store_true', default=False,
help="Disallow Google location services on devices.")
parser.add_option('-d', '--device',
help='The serial number of the device to be provisioned')
parser.add_option('-t', '--target', default='Debug', help='The build target')
parser.add_option(
'-r', '--auto-reconnect', action='store_true',
help='Push binary which will reboot the device on adb disconnections.')
options, args = parser.parse_args(argv[1:])
constants.SetBuildType(options.target)
if args:
print >> sys.stderr, 'Unused args %s' % args
return 1
return ProvisionDevices(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
yunity/foodsaving-backend | karrot/users/factories.py | 1 | 1082 | from django.contrib.auth import get_user_model
from factory import DjangoModelFactory, CREATE_STRATEGY, LazyAttribute, PostGeneration, Sequence
from karrot.utils.tests.fake import faker
class UserFactory(DjangoModelFactory):
class Meta:
model = get_user_model()
strategy = CREATE_STRATEGY
is_active = True
is_staff = False
display_name = LazyAttribute(lambda _: faker.name())
email = Sequence(lambda n: str(n) + faker.email())
description = LazyAttribute(lambda _: faker.text())
# Use display_name as password, as it is readable
password = PostGeneration(lambda obj, *args, **kwargs: obj.set_password(obj.display_name))
@classmethod
def _create(cls, model_class, *args, **kwargs):
manager = cls._get_manager(model_class)
user = manager.create_user(*args, **kwargs)
return user
class VerifiedUserFactory(UserFactory):
@classmethod
def _create(cls, model_class, *args, **kwargs):
user = super()._create(model_class, *args, **kwargs)
user.verify_mail()
return user
| agpl-3.0 |
DepthDeluxe/ansible | lib/ansible/modules/network/avi/avi_stringgroup.py | 46 | 4045 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_stringgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of StringGroup Avi RESTful Object
description:
- This module is used to configure StringGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
kv:
description:
- Configure key value in the string group.
name:
description:
- Name of the string group.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of stringgroup.
- Enum options - SG_TYPE_STRING, SG_TYPE_KEYVAL.
- Default value when not specified in API or module is interpreted by Avi Controller as SG_TYPE_STRING.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the string group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a string group configuration
avi_stringgroup:
controller: ''
password: ''
username: ''
kv:
- key: text/html
- key: text/xml
- key: text/plain
- key: text/css
- key: text/javascript
- key: application/javascript
- key: application/x-javascript
- key: application/xml
- key: application/pdf
name: System-Compressible-Content-Types
tenant_ref: admin
type: SG_TYPE_STRING
'''
RETURN = '''
obj:
description: StringGroup (api/stringgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
kv=dict(type='list',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'stringgroup',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
robhudson/django | django/contrib/admin/migrations/0001_initial.py | 419 | 1958 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(
to_field='id',
on_delete=models.SET_NULL,
blank=True, null=True,
to='contenttypes.ContentType',
verbose_name='content type',
)),
('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name='user',
)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| bsd-3-clause |
jiminliang/cuda-convnet2 | make-data/make-data.py | 179 | 7155 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
# This script makes batches suitable for training from raw ILSVRC 2012 tar files.
import tarfile
from StringIO import StringIO
from random import shuffle
import sys
from time import time
from pyext._MakeDataPyExt import resizeJPEG
import itertools
import os
import cPickle
import scipy.io
import math
import argparse as argp
# Set this to True to crop images to square. In this case each image will be
# resized such that its shortest edge is OUTPUT_IMAGE_SIZE pixels, and then the
# center OUTPUT_IMAGE_SIZE x OUTPUT_IMAGE_SIZE patch will be extracted.
#
# Set this to False to preserve image borders. In this case each image will be
# resized such that its shortest edge is OUTPUT_IMAGE_SIZE pixels. This was
# demonstrated to be superior by Andrew Howard in his very nice paper:
# http://arxiv.org/abs/1312.5402
CROP_TO_SQUARE = True
OUTPUT_IMAGE_SIZE = 256
# Number of threads to use for JPEG decompression and image resizing.
NUM_WORKER_THREADS = 8
# Don't worry about these.
OUTPUT_BATCH_SIZE = 3072
OUTPUT_SUB_BATCH_SIZE = 1024
def pickle(filename, data):
with open(filename, "w") as fo:
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
def unpickle(filename):
fo = open(filename, 'r')
contents = cPickle.load(fo)
fo.close()
return contents
def partition_list(l, partition_size):
divup = lambda a,b: (a + b - 1) / b
return [l[i*partition_size:(i+1)*partition_size] for i in xrange(divup(len(l),partition_size))]
def open_tar(path, name):
if not os.path.exists(path):
print "ILSVRC 2012 %s not found at %s. Make sure to set ILSVRC_SRC_DIR correctly at the top of this file (%s)." % (name, path, sys.argv[0])
sys.exit(1)
return tarfile.open(path)
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
def parse_devkit_meta(ILSVRC_DEVKIT_TAR):
tf = open_tar(ILSVRC_DEVKIT_TAR, 'devkit tar')
fmeta = tf.extractfile(tf.getmember('ILSVRC2012_devkit_t12/data/meta.mat'))
meta_mat = scipy.io.loadmat(StringIO(fmeta.read()))
labels_dic = dict((m[0][1][0], m[0][0][0][0]-1) for m in meta_mat['synsets'] if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)
label_names_dic = dict((m[0][1][0], m[0][2][0]) for m in meta_mat['synsets'] if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)
label_names = [tup[1] for tup in sorted([(v,label_names_dic[k]) for k,v in labels_dic.items()], key=lambda x:x[0])]
fval_ground_truth = tf.extractfile(tf.getmember('ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt'))
validation_ground_truth = [[int(line.strip()) - 1] for line in fval_ground_truth.readlines()]
tf.close()
return labels_dic, label_names, validation_ground_truth
def write_batches(target_dir, name, start_batch_num, labels, jpeg_files):
jpeg_files = partition_list(jpeg_files, OUTPUT_BATCH_SIZE)
labels = partition_list(labels, OUTPUT_BATCH_SIZE)
makedir(target_dir)
print "Writing %s batches..." % name
for i,(labels_batch, jpeg_file_batch) in enumerate(zip(labels, jpeg_files)):
t = time()
jpeg_strings = list(itertools.chain.from_iterable(resizeJPEG([jpeg.read() for jpeg in jpeg_file_batch], OUTPUT_IMAGE_SIZE, NUM_WORKER_THREADS, CROP_TO_SQUARE)))
batch_path = os.path.join(target_dir, 'data_batch_%d' % (start_batch_num + i))
makedir(batch_path)
for j in xrange(0, len(labels_batch), OUTPUT_SUB_BATCH_SIZE):
pickle(os.path.join(batch_path, 'data_batch_%d.%d' % (start_batch_num + i, j/OUTPUT_SUB_BATCH_SIZE)),
{'data': jpeg_strings[j:j+OUTPUT_SUB_BATCH_SIZE],
'labels': labels_batch[j:j+OUTPUT_SUB_BATCH_SIZE]})
print "Wrote %s (%s batch %d of %d) (%.2f sec)" % (batch_path, name, i+1, len(jpeg_files), time() - t)
return i + 1
if __name__ == "__main__":
parser = argp.ArgumentParser()
parser.add_argument('--src-dir', help='Directory containing ILSVRC2012_img_train.tar, ILSVRC2012_img_val.tar, and ILSVRC2012_devkit_t12.tar.gz', required=True)
parser.add_argument('--tgt-dir', help='Directory to output ILSVRC 2012 batches suitable for cuda-convnet to train on.', required=True)
args = parser.parse_args()
print "CROP_TO_SQUARE: %s" % CROP_TO_SQUARE
print "OUTPUT_IMAGE_SIZE: %s" % OUTPUT_IMAGE_SIZE
print "NUM_WORKER_THREADS: %s" % NUM_WORKER_THREADS
ILSVRC_TRAIN_TAR = os.path.join(args.src_dir, 'ILSVRC2012_img_train.tar')
ILSVRC_VALIDATION_TAR = os.path.join(args.src_dir, 'ILSVRC2012_img_val.tar')
ILSVRC_DEVKIT_TAR = os.path.join(args.src_dir, 'ILSVRC2012_devkit_t12.tar.gz')
assert OUTPUT_BATCH_SIZE % OUTPUT_SUB_BATCH_SIZE == 0
labels_dic, label_names, validation_labels = parse_devkit_meta(ILSVRC_DEVKIT_TAR)
with open_tar(ILSVRC_TRAIN_TAR, 'training tar') as tf:
synsets = tf.getmembers()
synset_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in synsets]
print "Loaded synset tars."
print "Building training set image list (this can take 10-20 minutes)..."
sys.stdout.flush()
train_jpeg_files = []
for i,st in enumerate(synset_tars):
if i % 100 == 0:
print "%d%% ..." % int(round(100.0 * float(i) / len(synset_tars))),
sys.stdout.flush()
train_jpeg_files += [st.extractfile(m) for m in st.getmembers()]
st.close()
shuffle(train_jpeg_files)
train_labels = [[labels_dic[jpeg.name[:9]]] for jpeg in train_jpeg_files]
print "done"
# Write training batches
i = write_batches(args.tgt_dir, 'training', 0, train_labels, train_jpeg_files)
# Write validation batches
val_batch_start = int(math.ceil((i / 1000.0))) * 1000
with open_tar(ILSVRC_VALIDATION_TAR, 'validation tar') as tf:
validation_jpeg_files = sorted([tf.extractfile(m) for m in tf.getmembers()], key=lambda x:x.name)
write_batches(args.tgt_dir, 'validation', val_batch_start, validation_labels, validation_jpeg_files)
# Write meta file
meta = unpickle('input_meta')
meta_file = os.path.join(args.tgt_dir, 'batches.meta')
meta.update({'batch_size': OUTPUT_BATCH_SIZE,
'num_vis': OUTPUT_IMAGE_SIZE**2 * 3,
'label_names': label_names})
pickle(meta_file, meta)
print "Wrote %s" % meta_file
print "All done! ILSVRC 2012 batches are in %s" % args.tgt_dir
| apache-2.0 |
elover/python-django-blog | myblog/pygments/lexers/foxpro.py | 335 | 26220 | # -*- coding: utf-8 -*-
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
*New in Pygments 1.6.*
"""
name = 'FoxPro'
aliases = ['Clipper', 'XBase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
| mit |
drbean/ultisnips | test/test_SnippetPriorities.py | 5 | 4205 | from test.vim_test_case import VimTestCase as _VimTest
from test.constant import EX, ESC
class SnippetPriorities_MultiWordTriggerOverwriteExisting(_VimTest):
snippets = (
("test me", "${1:Hallo}", "Types Hallo"),
("test me", "${1:World}", "Types World"),
("test me", "We overwrite", "Overwrite the two", "", 1),
)
keys = "test me" + EX
wanted = "We overwrite"
class SnippetPriorities_DoNotCareAboutNonMatchings(_VimTest):
snippets = (
("test1", "Hallo", "Types Hallo"),
("test2", "We overwrite", "Overwrite the two", "", 1),
)
keys = "test1" + EX
wanted = "Hallo"
class SnippetPriorities_OverwriteExisting(_VimTest):
snippets = (
("test", "${1:Hallo}", "Types Hallo"),
("test", "${1:World}", "Types World"),
("test", "We overwrite", "Overwrite the two", "", 1),
)
keys = "test" + EX
wanted = "We overwrite"
class SnippetPriorities_OverwriteTwice_ECR(_VimTest):
snippets = (
("test", "${1:Hallo}", "Types Hallo"),
("test", "${1:World}", "Types World"),
("test", "We overwrite", "Overwrite the two", "", 1),
("test", "again", "Overwrite again", "", 2),
)
keys = "test" + EX
wanted = "again"
class SnippetPriorities_OverwriteThenChoose_ECR(_VimTest):
snippets = (
("test", "${1:Hallo}", "Types Hallo"),
("test", "${1:World}", "Types World"),
("test", "We overwrite", "Overwrite the two", "", 1),
("test", "No overwrite", "Not overwritten", "", 1),
)
keys = "test" + EX + "1\n\n" + "test" + EX + "2\n"
wanted = "We overwrite\nNo overwrite"
class SnippetPriorities_AddedHasHigherThanFile(_VimTest):
files = {
"us/all.snippets": r"""
snippet test "Test Snippet" b
This is a test snippet
endsnippet
"""
}
snippets = (("test", "We overwrite", "Overwrite the two", "", 1),)
keys = "test" + EX
wanted = "We overwrite"
class SnippetPriorities_FileHasHigherThanAdded(_VimTest):
files = {
"us/all.snippets": r"""
snippet test "Test Snippet" b
This is a test snippet
endsnippet
"""
}
snippets = (("test", "We do not overwrite", "Overwrite the two", "", -1),)
keys = "test" + EX
wanted = "This is a test snippet"
class SnippetPriorities_FileHasHigherThanAdded_neg_prio(_VimTest):
files = {
"us/all.snippets": r"""
priority -3
snippet test "Test Snippet" b
This is a test snippet
endsnippet
"""
}
snippets = (("test", "We overwrite", "Overwrite the two", "", -5),)
keys = "test" + EX
wanted = "This is a test snippet"
class SnippetPriorities_SimpleClear(_VimTest):
files = {
"us/all.snippets": r"""
priority 1
clearsnippets
priority -1
snippet test "Test Snippet"
Should not expand to this.
endsnippet
"""
}
keys = "test" + EX
wanted = "test" + EX
class SnippetPriorities_SimpleClear2(_VimTest):
files = {
"us/all.snippets": r"""
clearsnippets
snippet test "Test snippet"
Should not expand to this.
endsnippet
"""
}
keys = "test" + EX
wanted = "test" + EX
class SnippetPriorities_ClearedByParent(_VimTest):
files = {
"us/p.snippets": r"""
clearsnippets
""",
"us/c.snippets": r"""
extends p
snippet test "Test snippets"
Should not expand to this.
endsnippet
""",
}
keys = ESC + ":set ft=c\n" + "itest" + EX
wanted = "test" + EX
class SnippetPriorities_ClearedByChild(_VimTest):
files = {
"us/p.snippets": r"""
snippet test "Test snippets"
Should only expand in p.
endsnippet
""",
"us/c.snippets": r"""
extends p
clearsnippets
""",
}
keys = (
ESC
+ ":set ft=p\n"
+ "itest"
+ EX
+ "\n"
+ ESC
+ ":set ft=c\n"
+ "itest"
+ EX
+ ESC
+ ":set ft=p"
)
wanted = "Should only expand in p.\ntest" + EX
| gpl-3.0 |
fuhongliang/odoo | addons/hr_evaluation/__init__.py | 432 | 1084 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_evaluation
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rodorad/spark-tk | regression-tests/generatedata/naive_bayes_generator.py | 14 | 3334 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import itertools
def generate_data_set(listOfCoeffs, numDiceRolls):
"""generatre a naive bayes dataset"""
# numDiceRolls denotes the number of times to generate
# a data row for each probability, e.g., if the numDiceRolls
# is 100, for each probability in the probability table
# we will generate 100 rows of data, so the number of
# data rows will be the number of probabilities * numDiceRolls
# the number of rows should be 2 ^ count(listOfCoeffs)
numCoeffs = len(listOfCoeffs)
dataRows = ""
coeffTable = generate_naive_bayes_table(listOfCoeffs, numCoeffs)
dataRows = generate_random_data_from_probability_table(coeffTable,
dataRows,
numCoeffs,
numDiceRolls)
with open("../datasets/naive_bayes.csv", "w") as file:
file.write(dataRows)
def generate_random_data_from_probability_table(coeffTable, dataRows, numCoeffs, numDiceRolls):
"""given a probability table, generate data from it"""
for row in coeffTable:
probability = row[len(row) - 1]
for n in range(0, numDiceRolls):
newRow = row
newRow[len(newRow) - 1] = roll_dice(probability)
rowLine = str(newRow)
rowLine = rowLine.replace("[", "")
rowLine = rowLine.replace("]", "")
rowLine = rowLine.replace(" ", "")
dataRows = dataRows + rowLine + "\n"
return dataRows
def generate_naive_bayes_table(listOfCoeffs, numCoeffs):
"""compute the coefficient table for naive bayes dataset"""
# gets all permutations of 0 and 1 of length numCoeffs
binaryPermutations = list(itertools.product(range(2), repeat=numCoeffs))
coeffTable = []
# now we compute the prob for each row and add the prob for
# each row as a col to the table
for element in binaryPermutations:
product = 1
element = list(element)
for i in range(0, numCoeffs):
if element[i] is 1:
product = listOfCoeffs[i] * product
if element[i] is 0:
product = (1 - listOfCoeffs[i]) * product
element.append(product)
coeffTable.append(list(element))
return coeffTable
def roll_dice(probability):
"""given a probability, generate 1 or 0"""
randomResult = random.uniform(0, 1)
if probability >= randomResult:
return 1
else:
return 0
if __name__ == "__main__":
generate_data_set([0.3, 0.4, 0.3], 500)
| apache-2.0 |
inveniosoftware/invenio-oauthclient | invenio_oauthclient/views/client.py | 1 | 8781 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Client blueprint used to handle OAuth callbacks."""
from flask import Blueprint, abort, current_app, redirect, request, url_for
from flask_oauthlib.client import OAuthException
from invenio_accounts.views import login as base_login
from invenio_db import db
from itsdangerous import BadData
from .._compat import _create_identifier
from ..errors import OAuthRemoteNotFound
from ..handlers import set_session_next_url
from ..handlers.rest import response_handler
from ..proxies import current_oauthclient
from ..utils import get_safe_redirect_target, serializer
blueprint = Blueprint(
'invenio_oauthclient',
__name__,
url_prefix='/oauth',
static_folder='../static',
template_folder='../templates',
)
rest_blueprint = Blueprint(
'invenio_oauthclient',
__name__,
url_prefix='/oauth',
static_folder='../static',
template_folder='../templates',
)
@blueprint.record_once
def post_ext_init(state):
"""Setup blueprint."""
app = state.app
app.config.setdefault(
'OAUTHCLIENT_SITENAME',
app.config.get('THEME_SITENAME', 'Invenio'))
app.config.setdefault(
'OAUTHCLIENT_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_oauthclient/base.html'))
app.config.setdefault(
'OAUTHCLIENT_COVER_TEMPLATE',
app.config.get('COVER_TEMPLATE',
'invenio_oauthclient/base_cover.html'))
app.config.setdefault(
'OAUTHCLIENT_SETTINGS_TEMPLATE',
app.config.get('SETTINGS_TEMPLATE',
'invenio_oauthclient/settings/base.html'))
@blueprint.route("/login")
def auto_redirect_login(*args, **kwargs):
"""Handles automatic redirect to external auth service.
The login endpoint will redirect automatically to the external
auth service is the following conditions are met:
* local login is disabled
* redirect to external login is enabled
* only one external auth service is configured
This function should be set as value of the invenio-accounts
config var ``ACCOUNTS_LOGIN_VIEW_FUNCTION``. It should be defined in
the Invenio application configuration to ensure that is correctly loaded.
"""
local_login_enabled = current_app.config.get(
"ACCOUNTS_LOCAL_LOGIN_ENABLED", False
)
auto_redirect_enabled = current_app.config.get(
"OAUTHCLIENT_AUTO_REDIRECT_TO_EXTERNAL_LOGIN", False
)
would_redirect = auto_redirect_enabled and not local_login_enabled
remote_apps = list(current_oauthclient.oauth.remote_apps)
if would_redirect and len(remote_apps) == 1:
# if local login is disabled and we only have one OAuth2 remote app
# configured, we forward directly to that
url = url_for("invenio_oauthclient.login", remote_app=remote_apps[0])
return redirect(url)
else:
return base_login(*args, **kwargs)
def _login(remote_app, authorized_view_name):
"""Send user to remote application for authentication."""
oauth = current_oauthclient.oauth
if remote_app not in oauth.remote_apps:
raise OAuthRemoteNotFound()
# Get redirect target in safe manner.
next_param = get_safe_redirect_target(arg='next')
# Redirect URI - must be registered in the remote service.
callback_url = url_for(
authorized_view_name,
remote_app=remote_app,
_external=True,
_scheme="https"
)
# Create a JSON Web Token that expires after OAUTHCLIENT_STATE_EXPIRES
# seconds.
state_token = serializer.dumps({
'app': remote_app,
'next': next_param,
'sid': _create_identifier(),
})
return oauth.remote_apps[remote_app].authorize(
callback=callback_url,
state=state_token,
)
@blueprint.route('/login/<remote_app>/')
def login(remote_app):
"""Send user to remote application for authentication."""
try:
return _login(remote_app, '.authorized')
except OAuthRemoteNotFound:
return abort(404)
@rest_blueprint.route('/login/<remote_app>/')
def rest_login(remote_app):
"""Send user to remote application for authentication."""
try:
return _login(remote_app, '.rest_authorized')
except OAuthRemoteNotFound:
abort(404)
def _authorized(remote_app=None):
"""Authorized handler callback."""
if remote_app not in current_oauthclient.handlers:
return abort(404)
state_token = request.args.get('state')
# Verify state parameter
assert state_token
# Checks authenticity and integrity of state and decodes the value.
state = serializer.loads(state_token)
# Verify that state is for this session, app and that next parameter
# have not been modified.
assert state['sid'] == _create_identifier()
assert state['app'] == remote_app
# Store next URL
set_session_next_url(remote_app, state['next'])
handler = current_oauthclient.handlers[remote_app]()
return handler
@blueprint.route('/authorized/<remote_app>/')
def authorized(remote_app=None):
"""Authorized handler callback."""
try:
return _authorized(remote_app)
except OAuthRemoteNotFound:
return abort(404)
except (AssertionError, BadData):
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or (
not(current_app.debug or current_app.testing)):
abort(403)
except OAuthException as e:
if e.type == 'invalid_response':
current_app.logger.warning(
'{message} ({data})'.format(
message=e.message,
data=e.data
)
)
abort(500)
else:
raise
@rest_blueprint.route('/authorized/<remote_app>/')
def rest_authorized(remote_app=None):
"""Authorized handler callback."""
try:
return _authorized(remote_app)
except OAuthRemoteNotFound:
abort(404)
except (AssertionError, BadData) as e:
current_app.logger.error(str(e))
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or (
not(current_app.debug or current_app.testing)):
return response_handler(
None,
current_app.config[
'OAUTHCLIENT_REST_DEFAULT_ERROR_REDIRECT_URL'],
payload=dict(
message="Invalid state.",
code=403
)
)
except OAuthException as e:
current_app.logger.error(str(e))
if e.type == 'invalid_response':
return response_handler(
None,
current_app.config[
'OAUTHCLIENT_REST_DEFAULT_ERROR_REDIRECT_URL'],
payload=dict(
message="Invalid response.",
code=500
)
)
else:
raise
def _signup(remote_app):
"""Extra signup step."""
if remote_app not in current_oauthclient.signup_handlers:
raise OAuthRemoteNotFound()
return current_oauthclient.signup_handlers[remote_app]['view']()
@blueprint.route('/signup/<remote_app>/', methods=['GET', 'POST'])
def signup(remote_app):
"""Extra signup step."""
try:
res = _signup(remote_app)
return abort(404) if res is None else res
except OAuthRemoteNotFound:
return abort(404)
@rest_blueprint.route('/signup/<remote_app>/', methods=['GET', 'POST'])
def rest_signup(remote_app):
"""Extra signup step."""
try:
res = _signup(remote_app)
return abort(404) if res is None else res
except OAuthRemoteNotFound:
abort(404)
def _disconnect(remote_app):
"""Extra signup step."""
if remote_app not in current_oauthclient.signup_handlers:
raise OAuthRemoteNotFound()
ret = current_oauthclient.disconnect_handlers[remote_app]()
db.session.commit()
return ret
@blueprint.route('/disconnect/<remote_app>/')
def disconnect(remote_app):
"""Disconnect user from remote application.
Removes application as well as associated information.
"""
try:
return _disconnect(remote_app)
except OAuthRemoteNotFound:
abort(404)
@rest_blueprint.route('/disconnect/<remote_app>/')
def rest_disconnect(remote_app):
"""Disconnect user from remote application.
Removes application as well as associated information.
"""
try:
return _disconnect(remote_app)
except OAuthRemoteNotFound:
abort(404)
| mit |
MSM8939-Samsung/android_kernel_samsung_a7lte | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
128technology/yinsolidated | test/plugin_json_test.py | 1 | 5346 | # Copyright 2016 128 Technology, Inc.
"""Unit tests for the yinsolidated pyang plugin"""
from __future__ import unicode_literals
import json
import os
import subprocess
import pyang
import pytest
from yinsolidated.plugin import plugin
YINSOLIDATED_PLUGIN_DIRECTORY = os.path.dirname(plugin.__file__)
YIN_NAMESPACE = "urn:ietf:params:xml:ns:yang:yin:1"
TEST_NAMESPACE = "urn:xml:ns:test"
AUGMENTING_NAMESPACE = "urn:xml:ns:test:augment"
NSMAP = {"yin": YIN_NAMESPACE, "test": TEST_NAMESPACE, "aug": AUGMENTING_NAMESPACE}
@pytest.fixture(scope="module")
def consolidated_model():
test_file_dir = os.path.dirname(os.path.realpath(__file__))
modules_dir = os.path.join(test_file_dir, "modules")
main_module = os.path.join(modules_dir, "test-module.yang")
augmenting_module = os.path.join(modules_dir, "augmenting-module.yang")
pyang_command = [
"pyang",
"-f",
"yinsolidated",
"--yinsolidated-output-format=json",
"-p",
modules_dir,
]
if pyang.__version__ < "1.7.2":
pyang_command.extend(["--plugindir", YINSOLIDATED_PLUGIN_DIRECTORY])
pyang_command.extend([main_module, augmenting_module])
consolidated_model_json = subprocess.check_output(pyang_command)
return json.loads(consolidated_model_json.decode("utf-8"))
def get_nested(yin_element, *path):
path = list(path)
last = path.pop()
for key in path:
for child in yin_element["children"]:
if child["keyword"] == key:
yin_element = child
break
else:
raise KeyError(key)
return yin_element[last]
class TestModule(object):
def test_module_root_element(self, consolidated_model):
assert consolidated_model["keyword"] == "module"
def test_module_name_attribute(self, consolidated_model):
module_name = consolidated_model.get("module-name")
assert module_name == "test-module"
def test_prefix_attribute(self, consolidated_model):
prefix = consolidated_model.get("module-prefix")
assert prefix == "test"
def test_nsmap(self, consolidated_model):
expected_nsmap = {"yin": YIN_NAMESPACE, "test": TEST_NAMESPACE}
assert consolidated_model["nsmap"] == expected_nsmap
def test_yang_version(self, consolidated_model):
yang_version = get_nested(consolidated_model, "yang-version", "value")
assert yang_version == "1"
def test_namespace(self, consolidated_model):
namespace = get_nested(consolidated_model, "namespace", "uri")
assert namespace == TEST_NAMESPACE
def test_prefix(self, consolidated_model):
prefix = get_nested(consolidated_model, "prefix", "value")
assert prefix == "test"
def test_organization(self, consolidated_model):
organization = get_nested(consolidated_model, "organization", "text")
assert organization == "None"
def test_contact(self, consolidated_model):
contact = get_nested(consolidated_model, "contact", "text")
assert contact == "Somebody"
def test_description(self, consolidated_model):
description = get_nested(consolidated_model, "description", "text")
assert (
description
== "Test module containing an exhaustive set of possible YANG statements"
)
def test_revision(self, consolidated_model):
date = get_nested(consolidated_model, "revision", "date")
assert date == "2016-04-22"
description = get_nested(consolidated_model, "revision", "description", "text")
assert description == "Initial revision"
def test_simple_extension_no_arg(self, consolidated_model):
name = get_nested(consolidated_model, "extension", "name")
assert name == "simple-extension-no-arg"
description = get_nested(consolidated_model, "extension", "description", "text")
assert (
description
== "An extension that takes no argument and does not support substatements"
)
def test_feature(self, consolidated_model):
name = get_nested(consolidated_model, "feature", "name")
assert name == "test-feature"
description = get_nested(consolidated_model, "feature", "description", "text")
assert description == "A test feature"
class TestSubmodule(object):
def test_data_definitions_included(self, consolidated_model):
name = get_nested(consolidated_model, "container", "name")
assert name == "submodule-container"
class TestEverything(object):
"""
Rather than having a dozen tests that each compare one chunk of the consolidated
model at a time, why not compare the whole thing in one go!
"""
@pytest.mark.skip(
"enable this 'test' to automatically re-generate the expected file"
)
def test_generate(self, consolidated_model):
with open(
os.path.join(os.path.dirname(__file__), "expected.json"), "w"
) as file_:
json.dump(consolidated_model, file_, indent=2)
assert False
def test_everything(self, consolidated_model):
with open(
os.path.join(os.path.dirname(__file__), "expected.json"), "r"
) as file_:
expected = json.load(file_)
assert consolidated_model == expected
| mit |
hpcleuven/easybuild-easyblocks | easybuild/easyblocks/a/abaqus.py | 2 | 4177 | ##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for ABAQUS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from easybuild.easyblocks.generic.binary import Binary
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
from distutils.version import LooseVersion
class EB_ABAQUS(Binary):
"""Support for installing ABAQUS."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ABAQUS."""
super(EB_ABAQUS, self).__init__(*args, **kwargs)
self.replayfile = None
def extract_step(self):
"""Use default extraction procedure instead of the one for the Binary easyblock."""
EasyBlock.extract_step(self)
def configure_step(self):
"""Configure ABAQUS installation."""
try:
self.replayfile = os.path.join(self.builddir, "installer.properties")
txt = '\n'.join([
"INSTALLER_UI=SILENT",
"USER_INSTALL_DIR=%s" % self.installdir,
"MAKE_DEF_VER=true",
"DOC_ROOT=UNDEFINED",
"DOC_ROOT_TYPE=false",
"DOC_ROOT_ESCAPED=UNDEFINED",
"ABAQUSLM_LICENSE_FILE=@abaqusfea",
"LICENSE_SERVER_TYPE=FLEXNET",
"PRODUCT_NAME=Abaqus %s" % self.version,
"TMPDIR=%s" % self.builddir,
"INSTALL_MPI=1",
])
f = file(self.replayfile, "w")
f.write(txt)
f.close()
except IOError, err:
raise EasyBuildError("Failed to create install properties file used for replaying installation: %s", err)
def install_step(self):
"""Install ABAQUS using 'setup'."""
os.chdir(self.builddir)
if self.cfg['install_cmd'] is None:
self.cfg['install_cmd'] = "%s/%s-%s/setup" % (self.builddir, self.name, self.version.split('-')[0])
self.cfg['install_cmd'] += " -replay %s" % self.replayfile
if LooseVersion(self.version) < LooseVersion("6.13"):
self.cfg['install_cmd'] += " -nosystemcheck"
super(EB_ABAQUS, self).install_step()
def sanity_check_step(self):
"""Custom sanity check for ABAQUS."""
verparts = self.version.split('-')[0].split('.')
custom_paths = {
'files': [os.path.join("Commands", "abaqus")],
'dirs': ["%s-%s" % ('.'.join(verparts[0:2]), verparts[2])]
}
custom_commands = [('abaqus', 'information=all')]
super(EB_ABAQUS, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
def make_module_req_guess(self):
"""Update PATH guesses for ABAQUS."""
guesses = super(EB_ABAQUS, self).make_module_req_guess()
guesses.update({
'PATH': ['Commands'],
})
return guesses
| gpl-2.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/IPython/nbconvert/utils/tests/test_pandoc.py | 14 | 2546 | """Test Pandoc module"""
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import warnings
from IPython.testing import decorators as dec
from IPython.nbconvert.tests.base import TestsBase
from .. import pandoc
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestPandoc(TestsBase):
"""Collection of Pandoc tests"""
def __init__(self, *args, **kwargs):
super(TestPandoc, self).__init__(*args, **kwargs)
self.original_env = os.environ.copy()
@dec.onlyif_cmds_exist('pandoc')
def test_pandoc_available(self):
""" Test behaviour that pandoc functions raise PandocMissing as documented """
pandoc.clean_cache()
os.environ["PATH"] = ""
with self.assertRaises(pandoc.PandocMissing):
pandoc.get_pandoc_version()
with self.assertRaises(pandoc.PandocMissing):
pandoc.check_pandoc_version()
with self.assertRaises(pandoc.PandocMissing):
pandoc.pandoc("", "markdown", "html")
# original_env["PATH"] should contain pandoc
os.environ["PATH"] = self.original_env["PATH"]
with warnings.catch_warnings(record=True) as w:
pandoc.get_pandoc_version()
pandoc.check_pandoc_version()
pandoc.pandoc("", "markdown", "html")
self.assertEqual(w, [])
@dec.onlyif_cmds_exist('pandoc')
def test_minimal_version(self):
original_minversion = pandoc._minimal_version
pandoc._minimal_version = "120.0"
with warnings.catch_warnings(record=True) as w:
assert not pandoc.check_pandoc_version()
self.assertEqual(len(w), 1)
pandoc._minimal_version = pandoc.get_pandoc_version()
assert pandoc.check_pandoc_version()
def pandoc_function_raised_missing(f, *args, **kwargs):
try:
f(*args, **kwargs)
except pandoc.PandocMissing:
return True
else:
return False
| mit |
40223209/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/numbers.py | 883 | 10398 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
| gpl-3.0 |
Taranys/Sick-Beard | lib/hachoir_parser/audio/midi.py | 90 | 7912 | """
Musical Instrument Digital Interface (MIDI) audio file parser.
Documentation:
- Standard MIDI File Format, Dustin Caldwell (downloaded on wotsit.org)
Author: Victor Stinner
Creation: 27 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Bits, ParserError,
String, UInt32, UInt24, UInt16, UInt8, Enum, RawBytes)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import createDict, humanDurationNanosec
from lib.hachoir_parser.common.tracker import NOTE_NAME
MAX_FILESIZE = 10 * 1024 * 1024
class Integer(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 8, description)
stream = parent.stream
addr = self.absolute_address
value = 0
while True:
bits = stream.readBits(addr, 8, parent.endian)
value = (value << 7) + (bits & 127)
if not(bits & 128):
break
addr += 8
self._size += 8
if 32 < self._size:
raise ParserError("Integer size is bigger than 32-bit")
self.createValue = lambda: value
def parseNote(parser):
yield Enum(UInt8(parser, "note", "Note number"), NOTE_NAME)
yield UInt8(parser, "velocity")
def parseControl(parser):
yield UInt8(parser, "control", "Controller number")
yield UInt8(parser, "value", "New value")
def parsePatch(parser):
yield UInt8(parser, "program", "New program number")
def parseChannel(parser):
yield UInt8(parser, "channel", "Channel number")
def parsePitch(parser):
yield UInt8(parser, "bottom", "(least sig) 7 bits of value")
yield UInt8(parser, "top", "(most sig) 7 bits of value")
def parseText(parser, size):
yield String(parser, "text", size)
def formatTempo(field):
return humanDurationNanosec(field.value*1000)
def parseTempo(parser, size):
yield textHandler(UInt24(parser, "microsec_quarter", "Microseconds per quarter note"), formatTempo)
def parseTimeSignature(parser, size):
yield UInt8(parser, "numerator", "Numerator of time signature")
yield UInt8(parser, "denominator", "denominator of time signature 2=quarter 3=eighth, etc.")
yield UInt8(parser, "nb_tick", "Number of ticks in metronome click")
yield UInt8(parser, "nb_32nd_note", "Number of 32nd notes to the quarter note")
class Command(FieldSet):
COMMAND = {}
for channel in xrange(16):
COMMAND[0x80+channel] = ("Note off (channel %u)" % channel, parseNote)
COMMAND[0x90+channel] = ("Note on (channel %u)" % channel, parseNote)
COMMAND[0xA0+channel] = ("Key after-touch (channel %u)" % channel, parseNote)
COMMAND[0xB0+channel] = ("Control change (channel %u)" % channel, parseControl)
COMMAND[0xC0+channel] = ("Program (patch) change (channel %u)" % channel, parsePatch)
COMMAND[0xD0+channel] = ("Channel after-touch (channel %u)" % channel, parseChannel)
COMMAND[0xE0+channel] = ("Pitch wheel change (channel %u)" % channel, parsePitch)
COMMAND_DESC = createDict(COMMAND, 0)
COMMAND_PARSER = createDict(COMMAND, 1)
META_COMMAND_TEXT = 1
META_COMMAND_NAME = 3
META_COMMAND = {
0x00: ("Sets the track's sequence number", None),
0x01: ("Text event", parseText),
0x02: ("Copyright info", parseText),
0x03: ("Sequence or Track name", parseText),
0x04: ("Track instrument name", parseText),
0x05: ("Lyric", parseText),
0x06: ("Marker", parseText),
0x07: ("Cue point", parseText),
0x2F: ("End of the track", None),
0x51: ("Set tempo", parseTempo),
0x58: ("Time Signature", parseTimeSignature),
0x59: ("Key signature", None),
0x7F: ("Sequencer specific information", None),
}
META_COMMAND_DESC = createDict(META_COMMAND, 0)
META_COMMAND_PARSER = createDict(META_COMMAND, 1)
def createFields(self):
yield Integer(self, "time", "Delta time in ticks")
yield Enum(textHandler(UInt8(self, "command"), hexadecimal), self.COMMAND_DESC)
command = self["command"].value
if command == 0xFF:
yield Enum(textHandler(UInt8(self, "meta_command"), hexadecimal), self.META_COMMAND_DESC)
yield UInt8(self, "data_len")
size = self["data_len"].value
if size:
command = self["meta_command"].value
if command in self.META_COMMAND_PARSER:
parser = self.META_COMMAND_PARSER[command]
else:
parser = None
if parser:
for field in parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
else:
if command not in self.COMMAND_PARSER:
raise ParserError("Unknown command: %s" % self["command"].display)
parser = self.COMMAND_PARSER[command]
for field in parser(self):
yield field
def createDescription(self):
if "meta_command" in self:
return self["meta_command"].display
else:
return self["command"].display
class Track(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield String(self, "marker", 4, "Track marker (MTrk)", charset="ASCII")
yield UInt32(self, "size")
if True:
while not self.eof:
yield Command(self, "command[]")
else:
size = self["size"].value
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
command = self["command[0]"]
if "meta_command" in command \
and command["meta_command"].value in (Command.META_COMMAND_TEXT, Command.META_COMMAND_NAME) \
and "text" in command:
return command["text"].value.strip("\r\n")
else:
return ""
class Header(FieldSet):
static_size = 10*8
FILE_FORMAT = {
0: "Single track",
1: "Multiple tracks, synchronous",
2: "Multiple tracks, asynchronous",
}
def createFields(self):
yield UInt32(self, "size")
yield Enum(UInt16(self, "file_format"), self.FILE_FORMAT)
yield UInt16(self, "nb_track")
yield UInt16(self, "delta_time", "Delta-time ticks per quarter note")
def createDescription(self):
return "%s; %s tracks" % (
self["file_format"].display, self["nb_track"].value)
class MidiFile(Parser):
MAGIC = "MThd"
PARSER_TAGS = {
"id": "midi",
"category": "audio",
"file_ext": ["mid", "midi"],
"mime": (u"audio/mime", ),
"magic": ((MAGIC, 0),),
"min_size": 64,
"description": "MIDI audio"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid signature"
if self["header/size"].value != 6:
return "Invalid header size"
return True
def createFields(self):
yield String(self, "signature", 4, r"MIDI signature (MThd)", charset="ASCII")
yield Header(self, "header")
while not self.eof:
yield Track(self, "track[]")
def createDescription(self):
return "MIDI audio: %s" % self["header"].description
def createContentSize(self):
count = self["/header/nb_track"].value - 1
start = self["track[%u]" % count].absolute_address
# Search "End of track" of last track
end = self.stream.searchBytes("\xff\x2f\x00", start, MAX_FILESIZE*8)
if end is not None:
return end + 3*8
return None
| gpl-3.0 |
commonwealth-of-puerto-rico/lean | paart/apps/user_management/links.py | 4 | 2691 | from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from navigation.classes import Link
from .icons import (icon_user, icon_user_add, icon_user_edit, icon_user_delete,
icon_group, icon_group_add, icon_group_edit, icon_group_delete, icon_set_password,
icon_group_members)
from .permissions import (PERMISSION_USER_CREATE, PERMISSION_USER_EDIT,
PERMISSION_USER_VIEW, PERMISSION_USER_DELETE, PERMISSION_GROUP_CREATE,
PERMISSION_GROUP_EDIT, PERMISSION_GROUP_VIEW, PERMISSION_GROUP_DELETE)
user_list = Link(text=_(u'user list'), view='user_list', icon=icon_user, permissions=[PERMISSION_USER_VIEW])
user_setup = Link(text=_(u'users'), view='user_list', icon=icon_user, permissions=[PERMISSION_USER_VIEW], children_view_regex=[r'^user_'])
user_add = Link(text=_(u'create new user'), view='user_add', icon=icon_user_add, permissions=[PERMISSION_USER_CREATE])
user_edit = Link(text=_(u'edit'), view='user_edit', args='object.id', icon=icon_user_edit, permissions=[PERMISSION_USER_EDIT])
user_delete = Link(text=_('delete'), view='user_delete', args='object.id', icon=icon_user_delete, permissions=[PERMISSION_USER_DELETE])
user_multiple_delete = Link(text=_('delete'), view='user_multiple_delete', icon=icon_user_delete, permissions=[PERMISSION_USER_DELETE])
user_set_password = Link(text=_('reset password'), view='user_set_password', args='object.id', icon=icon_set_password, permissions=[PERMISSION_USER_EDIT])
user_multiple_set_password = Link(text=_('reset password'), view='user_multiple_set_password', icon=icon_set_password, permissions=[PERMISSION_USER_EDIT])
user_groups = Link(text=_(u'groups'), view='user_groups', args='object.id', permissions=[PERMISSION_USER_EDIT], icon=icon_group)
group_list = Link(text=_(u'group list'), view='group_list', icon=icon_group, permissions=[PERMISSION_GROUP_VIEW])
group_setup = Link(text=_(u'groups'), view='group_list', icon=icon_group, permissions=[PERMISSION_GROUP_VIEW], children_view_regex=[r'^group_'])
group_add = Link(text=_(u'create new group'), view='group_add', icon=icon_group_add, permissions=[PERMISSION_GROUP_CREATE])
group_edit = Link(text=_(u'edit'), view='group_edit', args='object.id', icon=icon_group_edit, permissions=[PERMISSION_GROUP_EDIT])
group_delete = Link(text=_('delete'), view='group_delete', args='object.id', icon=icon_group_delete, permissions=[PERMISSION_GROUP_DELETE])
group_multiple_delete = Link(text=_('delete'), view='group_multiple_delete', icon=icon_group_delete, permissions=[PERMISSION_GROUP_DELETE])
group_members = Link(text=_(u'members'), view='group_members', args='object.id', icon=icon_group_members, permissions=[PERMISSION_GROUP_EDIT])
| gpl-3.0 |
DavidLP/home-assistant | homeassistant/components/mqtt/cover.py | 6 | 20154 | """Support for MQTT cover devices."""
import logging
import voluptuous as vol
from homeassistant.components import cover, mqtt
from homeassistant.components.cover import (
ATTR_POSITION, ATTR_TILT_POSITION, DEVICE_CLASSES_SCHEMA, SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT, SUPPORT_OPEN, SUPPORT_OPEN_TILT, SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION, SUPPORT_STOP, SUPPORT_STOP_TILT, CoverDevice)
from homeassistant.const import (
CONF_DEVICE, CONF_DEVICE_CLASS, CONF_NAME, CONF_OPTIMISTIC,
CONF_VALUE_TEMPLATE, STATE_CLOSED, STATE_OPEN, STATE_UNKNOWN)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_GET_POSITION_TOPIC = 'position_topic'
CONF_SET_POSITION_TEMPLATE = 'set_position_template'
CONF_SET_POSITION_TOPIC = 'set_position_topic'
CONF_TILT_COMMAND_TOPIC = 'tilt_command_topic'
CONF_TILT_STATUS_TOPIC = 'tilt_status_topic'
CONF_PAYLOAD_CLOSE = 'payload_close'
CONF_PAYLOAD_OPEN = 'payload_open'
CONF_PAYLOAD_STOP = 'payload_stop'
CONF_POSITION_CLOSED = 'position_closed'
CONF_POSITION_OPEN = 'position_open'
CONF_STATE_CLOSED = 'state_closed'
CONF_STATE_OPEN = 'state_open'
CONF_TILT_CLOSED_POSITION = 'tilt_closed_value'
CONF_TILT_INVERT_STATE = 'tilt_invert_state'
CONF_TILT_MAX = 'tilt_max'
CONF_TILT_MIN = 'tilt_min'
CONF_TILT_OPEN_POSITION = 'tilt_opened_value'
CONF_TILT_STATE_OPTIMISTIC = 'tilt_optimistic'
TILT_PAYLOAD = 'tilt'
COVER_PAYLOAD = 'cover'
DEFAULT_NAME = 'MQTT Cover'
DEFAULT_OPTIMISTIC = False
DEFAULT_PAYLOAD_CLOSE = 'CLOSE'
DEFAULT_PAYLOAD_OPEN = 'OPEN'
DEFAULT_PAYLOAD_STOP = 'STOP'
DEFAULT_POSITION_CLOSED = 0
DEFAULT_POSITION_OPEN = 100
DEFAULT_RETAIN = False
DEFAULT_TILT_CLOSED_POSITION = 0
DEFAULT_TILT_INVERT_STATE = False
DEFAULT_TILT_MAX = 100
DEFAULT_TILT_MIN = 0
DEFAULT_TILT_OPEN_POSITION = 100
DEFAULT_TILT_OPTIMISTIC = False
OPEN_CLOSE_FEATURES = (SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP)
TILT_FEATURES = (SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
def validate_options(value):
"""Validate options.
If set postion topic is set then get position topic is set as well.
"""
if (CONF_SET_POSITION_TOPIC in value and
CONF_GET_POSITION_TOPIC not in value):
raise vol.Invalid(
"set_position_topic must be set together with position_topic.")
return value
PLATFORM_SCHEMA = vol.All(mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_GET_POSITION_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_CLOSE, default=DEFAULT_PAYLOAD_CLOSE): cv.string,
vol.Optional(CONF_PAYLOAD_OPEN, default=DEFAULT_PAYLOAD_OPEN): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_POSITION_CLOSED, default=DEFAULT_POSITION_CLOSED): int,
vol.Optional(CONF_POSITION_OPEN, default=DEFAULT_POSITION_OPEN): int,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_SET_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_SET_POSITION_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_CLOSED, default=STATE_CLOSED): cv.string,
vol.Optional(CONF_STATE_OPEN, default=STATE_OPEN): cv.string,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TILT_CLOSED_POSITION,
default=DEFAULT_TILT_CLOSED_POSITION): int,
vol.Optional(CONF_TILT_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TILT_INVERT_STATE,
default=DEFAULT_TILT_INVERT_STATE): cv.boolean,
vol.Optional(CONF_TILT_MAX, default=DEFAULT_TILT_MAX): int,
vol.Optional(CONF_TILT_MIN, default=DEFAULT_TILT_MIN): int,
vol.Optional(CONF_TILT_OPEN_POSITION,
default=DEFAULT_TILT_OPEN_POSITION): int,
vol.Optional(CONF_TILT_STATE_OPTIMISTIC,
default=DEFAULT_TILT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_STATUS_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema), validate_options)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT cover through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT cover dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT cover."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(cover.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT Cover."""
async_add_entities([MqttCover(config, config_entry, discovery_hash)])
class MqttCover(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, CoverDevice):
"""Representation of a cover that can be controlled using MQTT."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the cover."""
self._unique_id = config.get(CONF_UNIQUE_ID)
self._position = None
self._state = None
self._sub_state = None
self._optimistic = None
self._tilt_value = None
self._tilt_optimistic = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(
self, discovery_hash, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
self._config = config
self._optimistic = (config[CONF_OPTIMISTIC] or
(config.get(CONF_STATE_TOPIC) is None and
config.get(CONF_GET_POSITION_TOPIC) is None))
self._tilt_optimistic = config[CONF_TILT_STATE_OPTIMISTIC]
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
set_position_template = self._config.get(CONF_SET_POSITION_TEMPLATE)
if set_position_template is not None:
set_position_template.hass = self.hass
topics = {}
@callback
def tilt_updated(msg):
"""Handle tilt updates."""
if (msg.payload.isnumeric() and
(self._config[CONF_TILT_MIN] <= int(msg.payload) <=
self._config[CONF_TILT_MAX])):
level = self.find_percentage_in_range(float(msg.payload))
self._tilt_value = level
self.async_write_ha_state()
@callback
def state_message_received(msg):
"""Handle new MQTT state messages."""
payload = msg.payload
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload == self._config[CONF_STATE_OPEN]:
self._state = False
elif payload == self._config[CONF_STATE_CLOSED]:
self._state = True
else:
_LOGGER.warning("Payload is not True or False: %s", payload)
return
self.async_write_ha_state()
@callback
def position_message_received(msg):
"""Handle new MQTT state messages."""
payload = msg.payload
if template is not None:
payload = template.async_render_with_possible_json_value(
payload)
if payload.isnumeric():
percentage_payload = self.find_percentage_in_range(
float(payload), COVER_PAYLOAD)
self._position = percentage_payload
self._state = percentage_payload == DEFAULT_POSITION_CLOSED
else:
_LOGGER.warning(
"Payload is not integer within range: %s",
payload)
return
self.async_write_ha_state()
if self._config.get(CONF_GET_POSITION_TOPIC):
topics['get_position_topic'] = {
'topic': self._config.get(CONF_GET_POSITION_TOPIC),
'msg_callback': position_message_received,
'qos': self._config[CONF_QOS]}
elif self._config.get(CONF_STATE_TOPIC):
topics['state_topic'] = {
'topic': self._config.get(CONF_STATE_TOPIC),
'msg_callback': state_message_received,
'qos': self._config[CONF_QOS]}
else:
# Force into optimistic mode.
self._optimistic = True
if self._config.get(CONF_TILT_STATUS_TOPIC) is None:
self._tilt_optimistic = True
else:
self._tilt_optimistic = False
self._tilt_value = STATE_UNKNOWN
topics['tilt_status_topic'] = {
'topic': self._config.get(CONF_TILT_STATUS_TOPIC),
'msg_callback': tilt_updated,
'qos': self._config[CONF_QOS]}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def name(self):
"""Return the name of the cover."""
return self._config[CONF_NAME]
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._state
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt."""
return self._tilt_value
@property
def device_class(self):
"""Return the class of this sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if self._config.get(CONF_COMMAND_TOPIC) is not None:
supported_features = OPEN_CLOSE_FEATURES
if self._config.get(CONF_SET_POSITION_TOPIC) is not None:
supported_features |= SUPPORT_SET_POSITION
if self._config.get(CONF_TILT_COMMAND_TOPIC) is not None:
supported_features |= TILT_FEATURES
return supported_features
async def async_open_cover(self, **kwargs):
"""Move the cover up.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_OPEN], self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = False
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_OPEN], COVER_PAYLOAD)
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_CLOSE], self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = True
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_CLOSED], COVER_PAYLOAD)
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the device.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_STOP], self._config[CONF_QOS],
self._config[CONF_RETAIN])
async def async_open_cover_tilt(self, **kwargs):
"""Tilt the cover open."""
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
self._config[CONF_TILT_OPEN_POSITION],
self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._tilt_optimistic:
self._tilt_value = self._config[CONF_TILT_OPEN_POSITION]
self.async_write_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Tilt the cover closed."""
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
self._config[CONF_TILT_CLOSED_POSITION],
self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._tilt_optimistic:
self._tilt_value = self._config[CONF_TILT_CLOSED_POSITION]
self.async_write_ha_state()
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
if ATTR_TILT_POSITION not in kwargs:
return
position = float(kwargs[ATTR_TILT_POSITION])
# The position needs to be between min and max
level = self.find_in_range_from_percent(position)
mqtt.async_publish(self.hass,
self._config.get(CONF_TILT_COMMAND_TOPIC),
level,
self._config[CONF_QOS],
self._config[CONF_RETAIN])
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
set_position_template = self._config.get(CONF_SET_POSITION_TEMPLATE)
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
percentage_position = position
if set_position_template is not None:
try:
position = set_position_template.async_render(
**kwargs)
except TemplateError as ex:
_LOGGER.error(ex)
self._state = None
elif (self._config[CONF_POSITION_OPEN] != 100 and
self._config[CONF_POSITION_CLOSED] != 0):
position = self.find_in_range_from_percent(
position, COVER_PAYLOAD)
mqtt.async_publish(self.hass,
self._config.get(CONF_SET_POSITION_TOPIC),
position,
self._config[CONF_QOS],
self._config[CONF_RETAIN])
if self._optimistic:
self._state = percentage_position == \
self._config[CONF_POSITION_CLOSED]
self._position = percentage_position
self.async_write_ha_state()
def find_percentage_in_range(self, position, range_type=TILT_PAYLOAD):
"""Find the 0-100% value within the specified range."""
# the range of motion as defined by the min max values
if range_type == COVER_PAYLOAD:
max_range = self._config[CONF_POSITION_OPEN]
min_range = self._config[CONF_POSITION_CLOSED]
else:
max_range = self._config[CONF_TILT_MAX]
min_range = self._config[CONF_TILT_MIN]
current_range = max_range - min_range
# offset to be zero based
offset_position = position - min_range
position_percentage = round(
float(offset_position) / current_range * 100.0)
max_percent = 100
min_percent = 0
position_percentage = min(max(position_percentage, min_percent),
max_percent)
if range_type == TILT_PAYLOAD and \
self._config[CONF_TILT_INVERT_STATE]:
return 100 - position_percentage
return position_percentage
def find_in_range_from_percent(self, percentage, range_type=TILT_PAYLOAD):
"""
Find the adjusted value for 0-100% within the specified range.
if the range is 80-180 and the percentage is 90
this method would determine the value to send on the topic
by offsetting the max and min, getting the percentage value and
returning the offset
"""
if range_type == COVER_PAYLOAD:
max_range = self._config[CONF_POSITION_OPEN]
min_range = self._config[CONF_POSITION_CLOSED]
else:
max_range = self._config[CONF_TILT_MAX]
min_range = self._config[CONF_TILT_MIN]
offset = min_range
current_range = max_range - min_range
position = round(current_range * (percentage / 100.0))
position += offset
if range_type == TILT_PAYLOAD and \
self._config[CONF_TILT_INVERT_STATE]:
position = max_range - position + offset
return position
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
| apache-2.0 |
spatialdev/onadata | onadata/apps/viewer/migrations/0002_auto__add_field_datadictionary_xls.py | 13 | 8621 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DataDictionary.xls'
db.add_column('odk_viewer_datadictionary', 'xls', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DataDictionary.xls'
db.delete_column('odk_viewer_datadictionary', 'xls')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_viewer.columnrename': {
'Meta': {'object_name': 'ColumnRename'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'odk_viewer.datadictionary': {
'Meta': {'object_name': 'DataDictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('django.db.models.fields.TextField', [], {}),
'xform': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'data_dictionary'", 'unique': 'True', 'to': "orm['odk_logger.XForm']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'})
},
'odk_viewer.instancemodification': {
'Meta': {'object_name': 'InstanceModification'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modifications'", 'to': "orm['odk_logger.Instance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'odk_viewer.parsedinstance': {
'Meta': {'object_name': 'ParsedInstance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'parsed_instance'", 'unique': 'True', 'to': "orm['odk_logger.Instance']"}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['viewer']
| bsd-2-clause |
alexryndin/ambari | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/psutil/examples/nettop.py | 48 | 4716 | #!/usr/bin/env python
#
# $Id: iotop.py 1160 2011-10-14 18:50:36Z g.rodola@gmail.com $
#
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Shows real-time network statistics.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
$ python examples/nettop.py
-----------------------------------------------------------
total bytes: sent: 1.49 G received: 4.82 G
total packets: sent: 7338724 received: 8082712
wlan0 TOTAL PER-SEC
-----------------------------------------------------------
bytes-sent 1.29 G 0.00 B/s
bytes-recv 3.48 G 0.00 B/s
pkts-sent 7221782 0
pkts-recv 6753724 0
eth1 TOTAL PER-SEC
-----------------------------------------------------------
bytes-sent 131.77 M 0.00 B/s
bytes-recv 1.28 G 0.00 B/s
pkts-sent 0 0
pkts-recv 1214470 0
"""
import sys
import os
if os.name != 'posix':
sys.exit('platform not supported')
import atexit
import curses
import time
import psutil
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- curses stuff
def bytes2human(n):
"""
>>> bytes2human(10000)
'9.8 K'
>>> bytes2human(100001221)
'95.4 M'
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.2f %s' % (value, s)
return '%.2f B' % (n)
def poll(interval):
"""Retrieve raw stats within an interval window."""
tot_before = psutil.net_io_counters()
pnic_before = psutil.net_io_counters(pernic=True)
# sleep some time
time.sleep(interval)
tot_after = psutil.net_io_counters()
pnic_after = psutil.net_io_counters(pernic=True)
return (tot_before, tot_after, pnic_before, pnic_after)
def refresh_window(tot_before, tot_after, pnic_before, pnic_after):
"""Print stats on screen."""
global lineno
# totals
print_line("total bytes: sent: %-10s received: %s" % (
bytes2human(tot_after.bytes_sent),
bytes2human(tot_after.bytes_recv))
)
print_line("total packets: sent: %-10s received: %s" % (
tot_after.packets_sent, tot_after.packets_recv))
# per-network interface details: let's sort network interfaces so
# that the ones which generated more traffic are shown first
print_line("")
nic_names = list(pnic_after.keys())
nic_names.sort(key=lambda x: sum(pnic_after[x]), reverse=True)
for name in nic_names:
stats_before = pnic_before[name]
stats_after = pnic_after[name]
templ = "%-15s %15s %15s"
print_line(templ % (name, "TOTAL", "PER-SEC"), highlight=True)
print_line(templ % (
"bytes-sent",
bytes2human(stats_after.bytes_sent),
bytes2human(
stats_after.bytes_sent - stats_before.bytes_sent) + '/s',
))
print_line(templ % (
"bytes-recv",
bytes2human(stats_after.bytes_recv),
bytes2human(
stats_after.bytes_recv - stats_before.bytes_recv) + '/s',
))
print_line(templ % (
"pkts-sent",
stats_after.packets_sent,
stats_after.packets_sent - stats_before.packets_sent,
))
print_line(templ % (
"pkts-recv",
stats_after.packets_recv,
stats_after.packets_recv - stats_before.packets_recv,
))
print_line("")
win.refresh()
lineno = 0
def main():
try:
interval = 0
while True:
args = poll(interval)
refresh_window(*args)
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
| apache-2.0 |
Voluntarynet/BitmessageKit | BitmessageKit/Vendor/static-python/Lib/test/test_glob.py | 88 | 6941 | import glob
import os
import shutil
import sys
import unittest
from test.test_support import run_unittest, TESTFN
def fsdecode(s):
return unicode(s, sys.getfilesystemencoding())
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
shutil.rmtree(self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
ures = [fsdecode(x) for x in res]
self.assertEqual(glob.glob(fsdecode(p)), ures)
self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
self.assertEqual({type(r) for r in res}, {str})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertEqual({type(r) for r in res}, {str})
# test return types are unicode, but only if os.listdir
# returns unicode filenames
tmp = os.listdir(fsdecode(os.curdir))
if {type(x) for x in tmp} == {unicode}:
res = glob.glob(u'*')
self.assertEqual({type(r) for r in res}, {unicode})
res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_unicode_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with an
# unicode argument.
res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
{fsdecode(self.norm('aaa') + os.sep),
fsdecode(self.norm('aab') + os.sep)},
])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
self.norm('sym3', 'EF')])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(u'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(u'?:'), [])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
| mit |
jbedorf/tensorflow | tensorflow/python/tpu/tpu_embedding.py | 2 | 44069 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.core.protobuf.tpu import optimization_parameters_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as elc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu.ops import tpu_ops
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
class TableConfig(
collections.namedtuple(
'TableConfig',
['vocabulary_size', 'dimension', 'initializer', 'combiner'])):
"""Embedding table configuration."""
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
supported, with 'mean' the default. 'sqrtn' often achieves good
accuracy, in particular with bag-of-words columns. For more information,
see `tf.nn.embedding_lookup_sparse`. None is only valid for dense rather
than sparse tensors.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn', None):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
AdamSlotVariableNames = collections.namedtuple(
'AdamSlotVariableNames', ['m', 'v'])
AdagradSlotVariableName = collections.namedtuple(
'AdagradSlotVariableName', ['accumulator'])
AdamSlotVariables = collections.namedtuple(
'AdamSlotVariables', ['m', 'v'])
AdagradSlotVariable = collections.namedtuple(
'AdagradSlotVariable', ['accumulator'])
VariablesAndOps = collections.namedtuple(
'VariablesAndOps',
['embedding_variables_by_table', 'slot_variables_by_table',
'load_ops', 'retrieve_ops']
)
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad."""
def __init__(self, learning_rate, initial_accumulator=0.1,
use_gradient_accumulation=True):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
"""
super(AdagradParameters, self).__init__(learning_rate,
use_gradient_accumulation)
if initial_accumulator <= 0:
raise ValueError('Adagrad initial_accumulator must be positive')
self.initial_accumulator = initial_accumulator
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam."""
def __init__(self, learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=True):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster. Please see
`optimization_parameters.proto` for details.
for details.
"""
super(AdamParameters, self).__init__(learning_rate,
use_gradient_accumulation)
if beta1 < 0. or beta1 >= 1.:
raise ValueError('beta1 must be between 0. and 1; got {}.'.format(beta1))
if beta2 < 0. or beta2 >= 1.:
raise ValueError('beta2 must be between 0. and 1; got {}.'.format(beta2))
if epsilon <= 0.:
raise ValueError('epsilon must be positive; got {}.'.format(epsilon))
if not use_gradient_accumulation and not lazy_adam:
raise ValueError(
'When disabling Lazy Adam, gradient accumulation must be used.')
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
"""
def __init__(self, learning_rate):
super(StochasticGradientDescentParameters, self).__init__(
learning_rate, False)
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_table_dict = {'watched': 'video',
'favorited': 'video',
'friends': 'user'}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_table_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
embedding_variables_and_ops = embedding.create_variables_and_ops()
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
gradients = (
tpu_embedding_gradient.get_gradients_through_compute_gradients(
cross_shard_optimizer, loss, activations)
send_gradients_op = embedding.generate_send_gradients_op(gradients)
with ops.control_dependencies([train_op, send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding_variables_and_ops.load_ops())
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Instead of `feature_to_table_dict` which maps to table
# name, consider `feature_to_config_dict` which maps to `FeatureConfig`.
# `FeatureConfig` could have fields other than table name. For example, it
# could have a field to indicate that the feature should not be used to
# update embedding table (cr/204852758, cr/204940540). Also, this can support
# different combiners for different features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_table_dict` lists of `TableSpec` and `FeatureSpec` respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
def __init__(self,
table_to_config_dict,
feature_to_table_dict,
batch_size,
mode,
master,
optimization_parameters=None,
cluster_def=None,
pipeline_execution_with_tensor_core=True):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_table_dict: A dictionary mapping from string of feature name
to string of table name. Feature refers to ids to lookup in embedding
table, e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training and must
be `None` in inference.
cluster_def: A ClusterDef object describing the TPU cluster.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
Raises:
ValueError: if any input is invalid.
"""
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
_validate_feature_to_table_dict(table_to_config_dict, feature_to_table_dict)
self._feature_to_table_dict = _create_ordered_dict(feature_to_table_dict)
self._table_to_features_dict = _create_table_to_features_dict(
self._feature_to_table_dict)
self._combiners = _create_combiners(self._table_to_config_dict,
self._table_to_features_dict)
self._batch_size = batch_size
self._master = master
self._cluster_def = cluster_def
self._tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata( # pylint: disable=protected-access
self._master, cluster_def=self._cluster_def))
if self._tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(self._master))
self._num_hosts = self._tpu_system_metadata.num_hosts
master_job_name = tpu_system_metadata_lib.master_job(self._master,
self._cluster_def)
self._hosts = sorted([
device.name for device in self._tpu_system_metadata.devices
if 'device:CPU:' in device.name and (master_job_name is None or
master_job_name in device.name)])
self._num_cores_per_host = self._tpu_system_metadata.num_of_cores_per_host
self._num_cores = self._tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler = _get_optimization_handler(
self._optimization_parameters)
self._pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
self._config_proto = self._create_config_proto()
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def table_to_config_dict(self):
return copy.copy(self._table_to_config_dict)
@property
def feature_to_table_dict(self):
return copy.copy(self._feature_to_table_dict)
@property
def table_to_features_dict(self):
return copy.copy(self._table_to_features_dict)
@property
def optimization_parameters(self):
return self._optimization_parameters
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
table_descriptor.vocabulary_size = table_config.vocabulary_size
table_descriptor.dimension = table_config.dimension
features_for_table = self._table_to_features_dict[table]
table_descriptor.num_features = len(features_for_table)
table_descriptor.optimization_parameters.learning_rate.constant = (
self._optimization_parameters.learning_rate)
table_descriptor.optimization_parameters.gradient_accumulation_status = (
optimization_parameters_pb2.GradientAccumulationStatus.ENABLED
if self._optimization_parameters.use_gradient_accumulation else
optimization_parameters_pb2.GradientAccumulationStatus.DISABLED)
self._optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = elc.TPUEmbeddingConfiguration.DIV_DEFAULT
config_proto.pipeline_execution_with_tensor_core = (
self._pipeline_execution_with_tensor_core)
return config_proto
def create_variables_and_ops(self, embedding_variable_name_by_table=None,
slot_variable_names_by_table=None):
"""Create embedding and slot variables, with ops to load and retrieve them.
Args:
embedding_variable_name_by_table: A dictionary mapping from string of
table name to string of embedding variable name. If `None`,
defaults from `get_default_slot_variable_names()` will be used.
slot_variable_names_by_table: A dictionary mapping from string of table
name to `AdamSlotVariableNames`, `AdagradSlotVariableNames` etc. If
`None`, defaults from `get_default_slot_variable_names()` will be used.
Returns:
`tpu_embedding.VariablesAndOps` with:
A dictionary mapping from string of table name to embedding variables,
A dictionary mapping from string of table name to AdagradSlotVariable,
AdamSlotVariables etc with slot variables,
A function which returns a list of ops to load embedding and slot
variables from TPU to CPU.
A function which returns a list of ops to retrieve embedding and slot
variables from TPU to CPU.
"""
embedding_variables_by_table = {}
slot_variables_by_table = {}
load_op_fns = []
retrieve_op_fns = []
for table in self._table_to_config_dict:
if embedding_variable_name_by_table:
embedding_variable_name = embedding_variable_name_by_table[table]
else:
embedding_variable_name = table
if slot_variable_names_by_table:
slot_variable_names = slot_variable_names_by_table[table]
else:
slot_variable_names = (
self._optimizer_handler.get_default_slot_variable_names(table))
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
table_variables = _create_partitioned_variables(
name=embedding_variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
embedding_variables_by_table[table] = table_variables
slot_variables_for_table, load_ops_fn, retrieve_ops_fn = (
self._optimizer_handler.create_variables_and_ops(
table, slot_variable_names, self._num_hosts,
self._table_to_config_dict[table], table_variables)
)
slot_variables_by_table[table] = slot_variables_for_table
load_op_fns.append(load_ops_fn)
retrieve_op_fns.append(retrieve_ops_fn)
def load_ops():
"""Calls and returns the load ops for each embedding table.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_ops_list = []
for load_op_fn in load_op_fns:
load_ops_list.extend(load_op_fn())
return load_ops_list
def retrieve_ops():
"""Calls and returns the retrieve ops for each embedding table.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_ops_list = []
for retrieve_op_fn in retrieve_op_fns:
retrieve_ops_list.extend(retrieve_op_fn())
return retrieve_ops_list
return VariablesAndOps(embedding_variables_by_table,
slot_variables_by_table,
load_ops, retrieve_ops)
def generate_enqueue_ops(self, sparse_features_list):
"""Generate enqueue ops.
Args:
sparse_features_list: a list of dictionary mapping from string
of feature names to sparse tensor. Each dictionary is for one
TPU core. Dictionaries for the same host should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_sparse_features_list(
sparse_features_list)
return [
self._generate_enqueue_op(
sparse_features, device_ordinal=i % self._num_cores_per_host)
for i, sparse_features in enumerate(sparse_features_list)
]
def _validate_generate_enqueue_ops_sparse_features_list(
self, sparse_features_list):
"""Validate `sparse_features_list`."""
if len(sparse_features_list) != self._num_cores:
raise ValueError('Length of `sparse_features_list` should match the '
'number of cores; '
'`len(sparse_features_list)` is {}, '
'number of cores is {}.'.format(
len(sparse_features_list), self._num_cores))
feature_set = set(self._feature_to_table_dict.keys())
contiguous_device = None
for i, sparse_features in enumerate(sparse_features_list):
used_feature_set = set(sparse_features.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`sparse_features_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`sparse_features_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, tensor in six.iteritems(sparse_features):
combiner = self._table_to_config_dict[
self._feature_to_table_dict[feature]].combiner
if not isinstance(tensor, sparse_tensor.SparseTensor) and combiner:
raise ValueError('`sparse_features_list[{}]` has a feature that is '
'not mapped to `SparseTensor` and has a combiner. '
'`feature`: {}, combiner: {}'.format(
i, feature, combiner))
# Check all features are on the same device.
if device is None:
device = tensor.op.device
device_feature = feature
else:
if device != tensor.op.device:
raise ValueError('Devices are different between features in '
'`sparse_features_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device, tensor.op.device, feature,
device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `sparse_features` which are on the '
'same host to be contiguous in '
'`sparse_features_list`, '
'`sparse_features_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, sparse_features, device_ordinal):
with ops.colocate_with(list(sparse_features.values())[0]):
sample_idcs, embedding_idcs, aggregation_weights, table_ids = (
self._format_for_tpu_embedding_sparse_tensor_batch(sparse_features))
return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_idcs,
embedding_idcs,
aggregation_weights,
table_ids,
device_ordinal=device_ordinal,
combiners=self._combiners)
def _format_for_tpu_embedding_sparse_tensor_batch(self, sparse_features):
"""Format sparse features for `enqueue_tpu_embedding_sparse_tensor_batch()`.
Args:
sparse_features: a `Dict` of tensors for embedding. Can be sparse or
dense.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_tensor_batch()`.
"""
sample_idcs, embedding_idcs, aggregation_weights, table_ids = (
list(), list(), list(), list())
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for feature in features:
tensor = sparse_features[feature]
if not isinstance(tensor, sparse_tensor.SparseTensor):
sample_idcs.append(array_ops.zeros([0], dtype=dtypes.int32))
embedding_idcs.append(tensor)
else:
sample_idcs.append(tensor.indices)
embedding_idcs.append(tensor.values)
aggregation_weights.append(array_ops.zeros([0]))
table_ids.append(table_id)
return sample_idcs, embedding_idcs, aggregation_weights, table_ids
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for lookup_id, feature in enumerate(features):
stride = len(self._table_to_features_dict[table])
activations[feature] = recv_activations[table_id][lookup_id::stride, :]
return activations
def generate_send_gradients_op(self, feature_to_gradient_dict):
"""Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
gradients = []
for table in self._table_to_features_dict:
features = self._table_to_features_dict[table]
table_gradients = [
feature_to_gradient_dict[feature] for feature in features
]
interleaved_table_grads = array_ops.reshape(
array_ops.stack(table_gradients, axis=1),
[-1, table_gradients[0].shape[1]])
gradients.append(interleaved_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients, config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_table_dict(table_to_config_dict,
feature_to_table_dict):
"""Validate `feature_to_table_dict`."""
used_table_set = set(feature_to_table_dict.values())
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_table_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_table_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters):
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationPramaters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def get_default_slot_variable_names(self, table):
raise NotImplementedError()
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def get_default_slot_variable_names(self, table):
return AdagradSlotVariableName('{}/{}'.format(table, 'Adagrad'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name=slot_variable_names.accumulator,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
slot_variables = AdagradSlotVariable(accumulator_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def get_default_slot_variable_names(self, table):
return AdamSlotVariableNames('{}/{}/m'.format(table, 'Adam'),
'{}/{}/v'.format(table, 'Adam'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name=slot_variable_names.m,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name=slot_variable_names.v,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
slot_variables = AdamSlotVariables(m_variables, v_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for Adam embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def get_default_slot_variable_names(self, table):
return None
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
del table_config
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for SGD embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return None, load_ops_fn, retrieve_ops_fn
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict, table_to_features_dict):
"""Create a per feature list of combiners, ordered by table."""
combiners = []
for table in table_to_config_dict:
combiner = table_to_config_dict[table].combiner or 'sum'
combiners.extend([combiner] * len(table_to_features_dict[table]))
return combiners
def _create_table_to_features_dict(feature_to_table_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
for feature, table in six.iteritems(feature_to_table_dict):
if table in table_to_features_dict_tmp:
table_to_features_dict_tmp[table].append(feature)
else:
table_to_features_dict_tmp[table] = [feature]
table_to_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
return table_to_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
if part_match:
idx = int(part_match.group(1))
else:
raise RuntimeError('Internal Error: '
'Expected %s to contain /part_*.' % op.name)
device = hosts[idx]
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
# TODO(shizhiw): automatically place embedding lookup elsewhere?
if vocabulary_size < num_hosts:
raise ValueError('`vocabulary_size`({}) is smaller than `num_hosts`({}). '
'As TPU embedding is not optimized for small tables, '
'please consider other ways for this embedding lookup.')
return list(variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_hosts),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
| apache-2.0 |
BigxMac/firefox-ios | scripts/xliff-cleanup.py | 2 | 2807 | #!/usr/bin/env python
#
# xliff-cleanup.py <files>
#
# 1. Remove all <file> sections that we do not care about. We only care about the
# the one for our main app and those for our extensions.
#
# 2. Look at all remaining <file> sections and remove those strings that should not
# be localized. Currently that means: CFBundleDisplayName, CFBundleName and
# CFBundleShortVersionString.
#
# 3. Remove all remaining <file> sections that are now have no <trans-unit> nodes
# in their <body> anymore.
#
# Modifies files in place. Makes no backup.
#
import sys
from lxml import etree
NS = {'x':'urn:oasis:names:tc:xliff:document:1.2'}
FILES_TO_KEEP = ('Client/Info.plist',
'Extensions/ShareTo/Info.plist',
'Extensions/SendTo/Info.plist')
STRINGS_TO_REMOVE = ('CFBundleDisplayName',
'CFBundleName',
'CFBundleShortVersionString')
if __name__ == "__main__":
for path in sys.argv[1:]:
# Read it in and modify it in memory
with open(path) as fp:
tree = etree.parse(fp)
root = tree.getroot()
# 1. Remove sections we do not care about
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
if file_node.get('original') not in FILES_TO_KEEP:
file_node.getparent().remove(file_node)
# 2. Remove strings we don't want to be translated
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
for trans_unit_node in file_node.xpath("./x:body/x:trans-unit", namespaces=NS):
id = trans_unit_node.get('id')
# TODO we should probably do the exception for SendTo in a nicer way with some kind of whitelist
if id and id in STRINGS_TO_REMOVE and not (original == "Extensions/SendTo/Info.plist" and id == "CFBundleDisplayName"):
trans_unit_node.getparent().remove(trans_unit_node)
# 3. Remove empty file sections
for file_node in root.xpath("//x:file", namespaces=NS):
original = file_node.get('original')
if original and original.endswith('Info.plist'):
trans_unit_nodes = file_node.xpath("x:body/x:trans-unit", namespaces=NS)
if len(trans_unit_nodes) == 0:
file_node.getparent().remove(file_node)
# Write it back to the same file
with open(path, "w") as fp:
fp.write(etree.tostring(tree))
| mpl-2.0 |
eaas-framework/virtualbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Xml/ModuleSurfaceAreaXml.py | 11 | 36459 | ## @file
# This file is used to parse a Module file of .PKG file
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
ModuleSurfaceAreaXml
'''
from xml.dom import minidom
from Library.String import ConvertNEToNOTEQ
from Library.String import ConvertNOTEQToNE
from Library.String import GetStringOfList
from Library.Xml.XmlRoutines import XmlElement
from Library.Xml.XmlRoutines import XmlAttribute
from Library.Xml.XmlRoutines import XmlNode
from Library.Xml.XmlRoutines import XmlList
from Library.Xml.XmlRoutines import CreateXmlElement
from Object.POM.CommonObject import GuidVersionObject
from Object.POM.ModuleObject import BootModeObject
from Object.POM.ModuleObject import DepexObject
from Object.POM.ModuleObject import ModuleObject
from Object.POM.ModuleObject import EventObject
from Object.POM.ModuleObject import HobObject
from Object.POM.ModuleObject import SourceFileObject
from Object.POM.ModuleObject import PackageDependencyObject
from Object.POM.ModuleObject import ExternObject
from Object.POM.ModuleObject import BinaryFileObject
from Object.POM.ModuleObject import AsBuiltObject
from Object.POM.ModuleObject import BinaryBuildFlagObject
from Xml.CommonXml import ClonedFromXml
from Xml.CommonXml import HeaderXml
from Xml.CommonXml import HelpTextXml
from Xml.CommonXml import CommonDefinesXml
from Xml.CommonXml import LibraryClassXml
from Xml.CommonXml import UserExtensionsXml
from Xml.CommonXml import MiscellaneousFileXml
from Xml.CommonXml import FilenameXml
from Xml.GuidProtocolPpiXml import GuidXml
from Xml.GuidProtocolPpiXml import ProtocolXml
from Xml.GuidProtocolPpiXml import PpiXml
from Xml.PcdXml import PcdEntryXml
from Xml.XmlParserMisc import GetHelpTextList
from Library import GlobalData
from Library.Misc import GetSplitValueList
## BinaryFileXml
#
# represent the following XML item
#
# <BinaryFile>
# <Filename
# FileType=" FileType " {1}
# SupArchList=" ArchListType " {0,1}
# FeatureFlag=" FeatureFlagExpression " {0,1} >
# xs:anyURI
# </Filename> {1,}
# <AsBuilt> ... </AsBuilt> {0,}
# </BinaryFile> {1,}
#
class BinaryFileXml(object):
def __init__(self):
self.FileNames = []
self.AsBuiltList = []
self.PatchPcdValues = ''
self.PcdExValues = ''
self.LibraryInstances = ''
self.BuildFlags = ''
def FromXml(self, Item, Key):
if self.FileNames:
pass
BinaryFile = BinaryFileObject()
FilenameList = []
for SubItem in XmlList(Item, '%s/Filename' % Key):
Axml = FilenameXml()
Bxml = Axml.FromXml(SubItem, 'Filename')
FilenameList.append(Bxml)
BinaryFile.SetFileNameList(FilenameList)
if GlobalData.gIS_BINARY_INF:
AsBuiltList = []
for AsBuiltItem in XmlList(Item, '%s/AsBuilt' % Key):
AsBuilt = AsBuiltObject()
PatchPcdValueList = []
for SubItem in XmlList(AsBuiltItem, 'AsBuilt/PatchPcdValue'):
Axml = PcdEntryXml()
Bxml = Axml.FromXml(SubItem, 'PatchPcdValue')
PatchPcdValueList.append(Bxml)
AsBuilt.SetPatchPcdList(PatchPcdValueList)
PcdExValueList = []
for SubItem in XmlList(AsBuiltItem, 'AsBuilt/PcdExValue'):
Axml = PcdEntryXml()
Bxml = Axml.FromXml(SubItem, 'PcdExValue')
PcdExValueList.append(Bxml)
AsBuilt.SetPcdExList(PcdExValueList)
LibraryList = []
for SubItem in XmlList(Item, '%s/AsBuilt/LibraryInstances/GUID' % Key):
GuidVerObj = GuidVersionObject()
GUID = XmlElement(SubItem, 'GUID')
Version = XmlAttribute(XmlNode(SubItem, 'GUID'), 'Version')
GuidVerObj.SetGuid(GUID)
GuidVerObj.SetVersion(Version)
LibraryList.append(GuidVerObj)
if XmlList(Item, '%s/AsBuilt/LibraryInstances' % Key) and not LibraryList:
LibraryList = [None]
AsBuilt.SetLibraryInstancesList(LibraryList)
BuildFlagList = []
for SubItem in XmlList(Item, '%s/AsBuilt/BuildFlags' % Key):
BuildFlag = BuildFlagXml()
BuildFlagList.append(BuildFlag.FromXml2(SubItem, 'BuildFlags'))
AsBuilt.SetBuildFlagsList(BuildFlagList)
AsBuiltList.append(AsBuilt)
BinaryFile.SetAsBuiltList(AsBuiltList)
return BinaryFile
def ToXml(self, BinaryFile, Key):
if self.FileNames:
pass
NodeList = []
FilenameList = BinaryFile.GetFileNameList()
for Filename in FilenameList:
Tmp = FilenameXml()
NodeList.append(Tmp.ToXml(Filename, 'Filename'))
if GlobalData.gIS_BINARY_INF:
AsBuildList = BinaryFile.GetAsBuiltList()
PatchPcdValueList = AsBuildList.GetPatchPcdList()
PcdExList = AsBuildList.GetPcdExList()
LibGuidVerList = AsBuildList.GetLibraryInstancesList()
BuildFlagList = AsBuildList.GetBuildFlagsList()
AsBuiltNodeList = []
for Pcd in PatchPcdValueList:
Tmp = PcdEntryXml()
AsBuiltNodeList.append(Tmp.ToXml4(Pcd, 'PatchPcdValue'))
for Pcd in PcdExList:
Tmp = PcdEntryXml()
AsBuiltNodeList.append(Tmp.ToXml4(Pcd, 'PcdExValue'))
GuiVerElemList = []
for LibGuidVer in LibGuidVerList:
GuiVerElem = \
CreateXmlElement('GUID', LibGuidVer.GetLibGuid(), [], [['Version', LibGuidVer.GetLibVersion()]])
GuiVerElemList.append(GuiVerElem)
if len(GuiVerElemList) > 0:
LibGuidVerElem = CreateXmlElement('LibraryInstances', '', GuiVerElemList, [])
AsBuiltNodeList.append(LibGuidVerElem)
for BuildFlag in BuildFlagList:
Tmp = BuildFlagXml()
Elem = CreateXmlElement('BuildFlags', ''.join(BuildFlag), [], [])
AsBuiltNodeList.append(Elem)
if len(AsBuiltNodeList) > 0:
Element = CreateXmlElement('AsBuilt', '', AsBuiltNodeList, [])
NodeList.append(Element)
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = "BinaryFiles:"
for Item in self.FileNames:
Str = Str + '\n\t' + str(Item)
for Item in self.PatchPcdValues:
Str = Str + '\n\t' + str(Item)
for Item in self.PcdExValues:
Str = Str + '\n\t' + str(Item)
for Item in self.LibraryInstances:
Str = Str + '\n\t' + str(Item)
for Item in self.BuildFlags:
Str = Str + '\n\t' + str(Item)
return Str
##
# PackageXml
#
class PackageXml(object):
def __init__(self):
self.Description = ''
self.Guid = ''
self.Version = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.Description = XmlElement(Item, '%s/Description' % Key)
self.Guid = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
PackageDependency = PackageDependencyObject()
PackageDependency.SetPackage(self.Description)
PackageDependency.SetGuid(self.Guid)
PackageDependency.SetVersion(self.Version)
PackageDependency.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
PackageDependency.SetSupArchList(self.CommonDefines.SupArchList)
return PackageDependency
def ToXml(self, PackageDependency, Key):
if self.Guid:
pass
AttributeList = [['SupArchList', GetStringOfList(PackageDependency.GetSupArchList())],
['FeatureFlag', ConvertNEToNOTEQ(PackageDependency.GetFeatureFlag())], ]
Element1 = CreateXmlElement('GUID', PackageDependency.GetGuid(), [],
[['Version', PackageDependency.GetVersion()]])
NodeList = [['Description', PackageDependency.GetPackage()], Element1, ]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Description = %s Guid = %s Version = %s %s" \
% (self.Description, self.Guid, self.Version, self.CommonDefines)
return Str
##
# ExternXml
#
class ExternXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.EntryPoint = ''
self.UnloadImage = ''
self.Constructor = ''
self.Destructor = ''
self.SupModList = ''
self.SupArchList = ''
self.HelpText = []
def FromXml(self, Item, Key):
self.CommonDefines.FromXml(Item, Key)
self.EntryPoint = XmlElement(Item, '%s/EntryPoint' % Key)
self.UnloadImage = XmlElement(Item, '%s/UnloadImage' % Key)
self.Constructor = XmlElement(Item, '%s/Constructor' % Key)
self.Destructor = XmlElement(Item, '%s/Destructor' % Key)
Extern = ExternObject()
Extern.SetEntryPoint(self.EntryPoint)
Extern.SetUnloadImage(self.UnloadImage)
Extern.SetConstructor(self.Constructor)
Extern.SetDestructor(self.Destructor)
if self.CommonDefines.SupModList:
Extern.SetSupModList(self.CommonDefines.SupModList)
if self.CommonDefines.SupArchList:
Extern.SetSupArchList(self.CommonDefines.SupArchList)
return Extern
def ToXml(self, Extern, Key):
if self.HelpText:
pass
NodeList = []
if Extern.GetEntryPoint():
NodeList.append(['EntryPoint', Extern.GetEntryPoint()])
if Extern.GetUnloadImage():
NodeList.append(['UnloadImage', Extern.GetUnloadImage()])
if Extern.GetConstructor():
NodeList.append(['Constructor', Extern.GetConstructor()])
if Extern.GetDestructor():
NodeList.append(['Destructor', Extern.GetDestructor()])
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = "EntryPoint = %s UnloadImage = %s Constructor = %s Destructor = %s %s" \
% (self.EntryPoint, self.UnloadImage, self.Constructor, self.Destructor, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# DepexXml
#
class DepexXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.Expression = None
self.HelpText = []
def FromXml(self, Item, Key):
if not Item:
return None
self.CommonDefines.FromXml(Item, Key)
self.Expression = XmlElement(Item, '%s/Expression' % Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Depex = DepexObject()
Depex.SetDepex(self.Expression)
Depex.SetModuleType(self.CommonDefines.SupModList)
Depex.SetSupArchList(self.CommonDefines.SupArchList)
Depex.SetFeatureFlag(self.CommonDefines.FeatureFlag)
Depex.SetHelpTextList(GetHelpTextList(self.HelpText))
return Depex
def ToXml(self, Depex, Key):
if self.HelpText:
pass
AttributeList = [['SupArchList', GetStringOfList(Depex.GetSupArchList())],
['SupModList', Depex.GetModuleType()]]
NodeList = [['Expression', Depex.GetDepex()]]
if Depex.GetHelpText():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Depex.GetHelpText(), 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Expression = %s" % (self.Expression)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# BootModeXml
#
class BootModeXml(object):
def __init__(self):
self.SupportedBootModes = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.SupportedBootModes = \
XmlElement(Item, '%s/SupportedBootModes' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
BootMode = BootModeObject()
BootMode.SetSupportedBootModes(self.SupportedBootModes)
BootMode.SetUsage(self.CommonDefines.Usage)
BootMode.SetHelpTextList(GetHelpTextList(self.HelpText))
return BootMode
def ToXml(self, BootMode, Key):
if self.HelpText:
pass
AttributeList = [['Usage', BootMode.GetUsage()], ]
NodeList = [['SupportedBootModes', BootMode.GetSupportedBootModes()]]
for Item in BootMode.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "SupportedBootModes = %s %s" % (self.SupportedBootModes, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# EventXml
#
class EventXml(object):
def __init__(self):
self.EventType = ''
self.Name = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.EventType = XmlAttribute(XmlNode(Item, '%s' % Key), 'EventType')
self.Name = XmlElement(Item, '%s' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Event = EventObject()
Event.SetEventType(self.EventType)
Event.SetUsage(self.CommonDefines.Usage)
Event.SetHelpTextList(GetHelpTextList(self.HelpText))
return Event
def ToXml(self, Event, Key):
if self.HelpText:
pass
AttributeList = [['EventType', Event.GetEventType()],
['Usage', Event.GetUsage()],
]
NodeList = []
for Item in Event.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "EventType = %s %s" % (self.EventType, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# HobXml
#
class HobXml(object):
def __init__(self):
self.HobType = ''
self.Name = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.HobType = XmlAttribute(XmlNode(Item, '%s' % Key), 'HobType')
self.Name = XmlElement(Item, '%s' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Hob = HobObject()
Hob.SetHobType(self.HobType)
Hob.SetSupArchList(self.CommonDefines.SupArchList)
Hob.SetUsage(self.CommonDefines.Usage)
Hob.SetHelpTextList(GetHelpTextList(self.HelpText))
return Hob
def ToXml(self, Hob, Key):
if self.Name:
pass
AttributeList = [['HobType', Hob.GetHobType()],
['Usage', Hob.GetUsage()],
['SupArchList', GetStringOfList(Hob.GetSupArchList())], ]
NodeList = []
for Item in Hob.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "HobType = %s %s" % (self.HobType, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# SourceFileXml
#
class SourceFileXml(object):
def __init__(self):
self.SourceFile = ''
self.ToolChainFamily = ''
self.FileType = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.ToolChainFamily = XmlAttribute(Item, 'Family')
self.SourceFile = XmlElement(Item, 'Filename')
self.CommonDefines.FromXml(Item, Key)
self.CommonDefines.FeatureFlag = ConvertNOTEQToNE(self.CommonDefines.FeatureFlag)
SourceFile = SourceFileObject()
SourceFile.SetSourceFile(self.SourceFile)
SourceFile.SetFamily(self.ToolChainFamily)
SourceFile.SetSupArchList(self.CommonDefines.SupArchList)
SourceFile.SetFeatureFlag(self.CommonDefines.FeatureFlag)
return SourceFile
def ToXml(self, SourceFile, Key):
if self.SourceFile:
pass
FeatureFlag = ConvertNEToNOTEQ(SourceFile.GetFeatureFlag())
AttributeList = [['SupArchList', GetStringOfList(SourceFile.GetSupArchList())],
['Family', SourceFile.GetFamily()],
['FeatureFlag', FeatureFlag], ]
Root = CreateXmlElement('%s' % Key, SourceFile.GetSourceFile(), [], AttributeList)
return Root
##
# ModulePropertyXml
#
class ModulePropertyXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.ModuleType = ''
self.Path = ''
self.PcdIsDriver = ''
self.UefiSpecificationVersion = ''
self.PiSpecificationVersion = ''
self.SpecificationList = []
self.SpecificationVersion = ''
self.BootModes = []
self.Events = []
self.HOBs = []
def FromXml(self, Item, Key, Header=None):
self.CommonDefines.FromXml(Item, Key)
self.ModuleType = XmlElement(Item, '%s/ModuleType' % Key)
self.Path = XmlElement(Item, '%s/Path' % Key)
self.PcdIsDriver = XmlElement(Item, '%s/PcdIsDriver' % Key)
self.UefiSpecificationVersion = XmlElement(Item, '%s/UefiSpecificationVersion' % Key)
self.PiSpecificationVersion = XmlElement(Item, '%s/PiSpecificationVersion' % Key)
for SubItem in XmlList(Item, '%s/Specification' % Key):
Specification = XmlElement(SubItem, '/Specification')
Version = XmlAttribute(XmlNode(SubItem, '/Specification'), 'Version')
self.SpecificationList.append((Specification, Version))
for SubItem in XmlList(Item, '%s/BootMode' % Key):
Axml = BootModeXml()
BootMode = Axml.FromXml(SubItem, 'BootMode')
self.BootModes.append(BootMode)
for SubItem in XmlList(Item, '%s/Event' % Key):
Axml = EventXml()
Event = Axml.FromXml(SubItem, 'Event')
self.Events.append(Event)
for SubItem in XmlList(Item, '%s/HOB' % Key):
Axml = HobXml()
Hob = Axml.FromXml(SubItem, 'HOB')
self.HOBs.append(Hob)
if Header == None:
Header = ModuleObject()
Header.SetModuleType(self.ModuleType)
Header.SetSupArchList(self.CommonDefines.SupArchList)
Header.SetModulePath(self.Path)
Header.SetPcdIsDriver(self.PcdIsDriver)
Header.SetUefiSpecificationVersion(self.UefiSpecificationVersion)
Header.SetPiSpecificationVersion(self.PiSpecificationVersion)
Header.SetSpecList(self.SpecificationList)
return Header, self.BootModes, self.Events, self.HOBs
def ToXml(self, Header, BootModes, Events, Hobs, Key):
if self.ModuleType:
pass
AttributeList = [['SupArchList', GetStringOfList(Header.GetSupArchList())], ]
NodeList = [['ModuleType', Header.GetModuleType()],
['Path', Header.GetModulePath()],
['PcdIsDriver', Header.GetPcdIsDriver()],
['UefiSpecificationVersion', Header.GetUefiSpecificationVersion()],
['PiSpecificationVersion', Header.GetPiSpecificationVersion()],
]
for Item in Header.GetSpecList():
Spec, Version = Item
SpecElem = CreateXmlElement('Specification', Spec, [], [['Version', Version]])
NodeList.append(SpecElem)
for Item in BootModes:
Tmp = BootModeXml()
NodeList.append(Tmp.ToXml(Item, 'BootMode'))
for Item in Events:
Tmp = EventXml()
NodeList.append(Tmp.ToXml(Item, 'Event'))
for Item in Hobs:
Tmp = HobXml()
NodeList.append(Tmp.ToXml(Item, 'HOB'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "ModuleType = %s Path = %s PcdIsDriver = %s UefiSpecificationVersion = %s PiSpecificationVersion = %s \
Specification = %s SpecificationVersion = %s %s" % \
(self.ModuleType, self.Path, self.PcdIsDriver, \
self.UefiSpecificationVersion, self.PiSpecificationVersion, \
self.SpecificationList, self.SpecificationVersion, self.CommonDefines)
for Item in self.BootModes:
Str = Str + '\n\t' + str(Item)
for Item in self.Events:
Str = Str + '\n\t' + str(Item)
for Item in self.HOBs:
Str = Str + '\n\t' + str(Item)
return Str
##
# ModuleXml
#
class ModuleSurfaceAreaXml(object):
def __init__(self, Package=''):
self.Module = None
#
# indicate the package that this module resides in
#
self.Package = Package
def FromXml2(self, Item, Module):
if self.Module:
pass
#
# PeiDepex
#
PeiDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PeiDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'PeiDepex'), 'PeiDepex')
PeiDepexList.append(Depex)
Module.SetPeiDepex(PeiDepexList)
#
# DxeDepex
#
DxeDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/DxeDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'DxeDepex'), 'DxeDepex')
DxeDepexList.append(Depex)
Module.SetDxeDepex(DxeDepexList)
#
# SmmDepex
#
SmmDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/SmmDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'SmmDepex'), 'SmmDepex')
SmmDepexList.append(Depex)
Module.SetSmmDepex(SmmDepexList)
#
# MiscellaneousFile
Tmp = MiscellaneousFileXml()
MiscFileList = Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/MiscellaneousFiles'), 'MiscellaneousFiles')
if MiscFileList:
Module.SetMiscFileList([MiscFileList])
else:
Module.SetMiscFileList([])
#
# UserExtensions
#
for Item in XmlList(Item, '/ModuleSurfaceArea/UserExtensions'):
Tmp = UserExtensionsXml()
UserExtension = Tmp.FromXml(Item, 'UserExtensions')
Module.SetUserExtensionList(Module.GetUserExtensionList() + [UserExtension])
return Module
def FromXml(self, Item, Key, IsStandAlongModule=False):
IsBinaryModule = XmlAttribute(Item, 'BinaryModule')
#
# Header
#
Tmp = HeaderXml()
Module = Tmp.FromXml(XmlNode(Item, '/%s/Header' % Key), 'Header', True, IsStandAlongModule)
Module.SetBinaryModule(IsBinaryModule)
if IsBinaryModule:
GlobalData.gIS_BINARY_INF = True
#
# ModuleProperties
#
Tmp = ModulePropertyXml()
(Module, BootModes, Events, HOBs) = \
Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/ModuleProperties'), 'ModuleProperties', Module)
Module.SetBootModeList(BootModes)
Module.SetEventList(Events)
Module.SetHobList(HOBs)
#
# ClonedFrom
#
Tmp = ClonedFromXml()
ClonedFrom = Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/ClonedFrom'), 'ClonedFrom')
if ClonedFrom:
Module.SetClonedFrom(ClonedFrom)
#
# LibraryClass
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions/LibraryClass'):
Tmp = LibraryClassXml()
LibraryClass = Tmp.FromXml(SubItem, 'LibraryClass')
Module.SetLibraryClassList(Module.GetLibraryClassList() + [LibraryClass])
if XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions') and \
not XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions/LibraryClass'):
Module.SetLibraryClassList([None])
#
# SourceFiles
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/SourceFiles/Filename'):
Tmp = SourceFileXml()
SourceFile = Tmp.FromXml(SubItem, 'Filename')
Module.SetSourceFileList(Module.GetSourceFileList() + [SourceFile])
if XmlList(Item, '/ModuleSurfaceArea/SourceFiles') and \
not XmlList(Item, '/ModuleSurfaceArea/SourceFiles/Filename') :
Module.SetSourceFileList([None])
#
# BinaryFile
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/BinaryFiles/BinaryFile'):
Tmp = BinaryFileXml()
BinaryFile = Tmp.FromXml(SubItem, 'BinaryFile')
Module.SetBinaryFileList(Module.GetBinaryFileList() + [BinaryFile])
if XmlList(Item, '/ModuleSurfaceArea/BinaryFiles') and \
not XmlList(Item, '/ModuleSurfaceArea/BinaryFiles/BinaryFile') :
Module.SetBinaryFileList([None])
#
# PackageDependencies
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PackageDependencies/Package'):
Tmp = PackageXml()
PackageDependency = Tmp.FromXml(SubItem, 'Package')
Module.SetPackageDependencyList(Module.GetPackageDependencyList() + [PackageDependency])
if XmlList(Item, '/ModuleSurfaceArea/PackageDependencies') and \
not XmlList(Item, '/ModuleSurfaceArea/PackageDependencies/Package'):
Module.SetPackageDependencyList([None])
#
# Guid
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Guids/GuidCName'):
Tmp = GuidXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'GuidCName')
Module.SetGuidList(Module.GetGuidList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/Guids') and not XmlList(Item, '/ModuleSurfaceArea/Guids/GuidCName'):
Module.SetGuidList([None])
#
# Protocol
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Protocols/Protocol'):
Tmp = ProtocolXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'Protocol')
Module.SetProtocolList(Module.GetProtocolList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/Protocols') and not XmlList(Item, '/ModuleSurfaceArea/Protocols/Protocol'):
Module.SetProtocolList([None])
#
# Ppi
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PPIs/Ppi'):
Tmp = PpiXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'Ppi')
Module.SetPpiList(Module.GetPpiList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/PPIs') and not XmlList(Item, '/ModuleSurfaceArea/PPIs/Ppi'):
Module.SetPpiList([None])
#
# Extern
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Externs/Extern'):
Tmp = ExternXml()
Extern = Tmp.FromXml(SubItem, 'Extern')
Module.SetExternList(Module.GetExternList() + [Extern])
if XmlList(Item, '/ModuleSurfaceArea/Externs') and not XmlList(Item, '/ModuleSurfaceArea/Externs/Extern'):
Module.SetExternList([None])
if not Module.GetBinaryModule():
#
# PcdCoded
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PcdCoded/PcdEntry'):
Tmp = PcdEntryXml()
PcdEntry = Tmp.FromXml3(SubItem, 'PcdEntry')
Module.SetPcdList(Module.GetPcdList() + [PcdEntry])
if XmlList(Item, '/ModuleSurfaceArea/PcdCoded') and \
not XmlList(Item, '/ModuleSurfaceArea/PcdCoded/PcdEntry'):
Module.SetPcdList([None])
Module = self.FromXml2(Item, Module)
#
# return the module object
#
self.Module = Module
return self.Module
def ToXml(self, Module):
if self.Package:
pass
#
# Create root node of module surface area
#
DomModule = minidom.Document().createElement('ModuleSurfaceArea')
if Module.GetBinaryModule():
DomModule.setAttribute('BinaryModule', 'true')
#
# Header
#
Tmp = HeaderXml()
DomModule.appendChild(Tmp.ToXml(Module, 'Header'))
#
# ModuleProperties
#
Tmp = ModulePropertyXml()
DomModule.appendChild(Tmp.ToXml(Module, Module.GetBootModeList(), Module.GetEventList(), Module.GetHobList(), \
'ModuleProperties'))
#
# ClonedFrom
#
Tmp = ClonedFromXml()
if Module.GetClonedFrom():
DomModule.appendChild(Tmp.ToXml(Module.GetClonedFrom(), 'ClonedFrom'))
#
# LibraryClass
#
LibraryClassNode = CreateXmlElement('LibraryClassDefinitions', '', [], [])
for LibraryClass in Module.GetLibraryClassList():
Tmp = LibraryClassXml()
LibraryClassNode.appendChild(Tmp.ToXml2(LibraryClass, 'LibraryClass'))
DomModule.appendChild(LibraryClassNode)
#
# SourceFile
#
SourceFileNode = CreateXmlElement('SourceFiles', '', [], [])
for SourceFile in Module.GetSourceFileList():
Tmp = SourceFileXml()
SourceFileNode.appendChild(Tmp.ToXml(SourceFile, 'Filename'))
DomModule.appendChild(SourceFileNode)
#
# BinaryFile
#
BinaryFileNode = CreateXmlElement('BinaryFiles', '', [], [])
for BinaryFile in Module.GetBinaryFileList():
Tmp = BinaryFileXml()
BinaryFileNode.appendChild(Tmp.ToXml(BinaryFile, 'BinaryFile'))
DomModule.appendChild(BinaryFileNode)
#
# PackageDependencies
#
PackageDependencyNode = CreateXmlElement('PackageDependencies', '', [], [])
for PackageDependency in Module.GetPackageDependencyList():
Tmp = PackageXml()
PackageDependencyNode.appendChild(Tmp.ToXml(PackageDependency, 'Package'))
DomModule.appendChild(PackageDependencyNode)
#
# Guid
#
GuidProtocolPpiNode = CreateXmlElement('Guids', '', [], [])
for GuidProtocolPpi in Module.GetGuidList():
Tmp = GuidXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'GuidCName'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Protocol
#
GuidProtocolPpiNode = CreateXmlElement('Protocols', '', [], [])
for GuidProtocolPpi in Module.GetProtocolList():
Tmp = ProtocolXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'Protocol'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Ppi
#
GuidProtocolPpiNode = CreateXmlElement('PPIs', '', [], [])
for GuidProtocolPpi in Module.GetPpiList():
Tmp = PpiXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'Ppi'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Extern
#
ExternNode = CreateXmlElement('Externs', '', [], [])
for Extern in Module.GetExternList():
Tmp = ExternXml()
ExternNode.appendChild(Tmp.ToXml(Extern, 'Extern'))
DomModule.appendChild(ExternNode)
#
# PcdCoded
#
PcdEntryNode = CreateXmlElement('PcdCoded', '', [], [])
for PcdEntry in Module.GetPcdList():
Tmp = PcdEntryXml()
PcdEntryNode.appendChild(Tmp.ToXml3(PcdEntry, 'PcdEntry'))
DomModule.appendChild(PcdEntryNode)
#
# PeiDepex
#
if Module.GetPeiDepex():
for Item in Module.GetPeiDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'PeiDepex'))
#
# DxeDepex
#
if Module.GetDxeDepex():
for Item in Module.GetDxeDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'DxeDepex'))
#
# SmmDepex
#
if Module.GetSmmDepex():
for Item in Module.GetSmmDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'SmmDepex'))
#
# MiscellaneousFile
#
if Module.GetMiscFileList():
Tmp = MiscellaneousFileXml()
DomModule.appendChild(Tmp.ToXml(Module.GetMiscFileList()[0], 'MiscellaneousFiles'))
#
# UserExtensions
#
if Module.GetUserExtensionList():
for UserExtension in Module.GetUserExtensionList():
Tmp = UserExtensionsXml()
DomModule.appendChild(Tmp.ToXml(UserExtension, 'UserExtensions'))
return DomModule
##
# BuildFlagXml used to generate BuildFlag for <AsBuilt>
#
class BuildFlagXml(object):
def __init__(self):
self.Target = ''
self.TagName = ''
self.Family = ''
self.AsBuiltFlags = ''
def FromXml(self, Item, Key):
self.Target = XmlElement(Item, '%s/Target' % Key)
self.TagName = XmlElement(Item, '%s/TagName' % Key)
self.Family = XmlElement(Item, '%s/Family' % Key)
BuildFlag = BinaryBuildFlagObject()
BuildFlag.SetTarget(self.Target)
BuildFlag.SetTagName(self.TagName)
BuildFlag.SetFamily(self.Family)
return BuildFlag
#
# For AsBuild INF usage
#
def FromXml2(self, Item, Key):
self.AsBuiltFlags = XmlElement(Item, '%s' % Key)
LineList = GetSplitValueList(self.AsBuiltFlags, '\n')
ReturnLine = ''
Count = 0
for Line in LineList:
if Count == 0:
ReturnLine = "# " + Line
else:
ReturnLine = ReturnLine + '\n' + '# ' + Line
Count += 1
BuildFlag = BinaryBuildFlagObject()
BuildFlag.SetAsBuiltOptionFlags(ReturnLine)
return BuildFlag
def ToXml(self, BuildFlag, Key):
if self.Target:
pass
AttributeList = []
NodeList = []
NodeList.append(['BuildFlags', BuildFlag])
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
| gpl-2.0 |
ChameleonCloud/horizon | openstack_dashboard/test/integration_tests/tests/test_login.py | 6 | 1213 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages import loginpage
class TestLogin(helpers.BaseTestCase):
"""This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
login_pg.go_to_login_page()
home_pg = login_pg.login()
if not home_pg.is_logged_in:
self.fail("Could not determine if logged in")
home_pg.log_out()
| apache-2.0 |
lepy/phuzzy | phuzzy/contrib/pydoe/doe_composite.py | 1 | 6172 | """
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
# import numpy as np
from phuzzy.contrib.pydoe.doe_factorial import ff2n
from phuzzy.contrib.pydoe.doe_star import star
from phuzzy.contrib.pydoe.doe_union import union
from phuzzy.contrib.pydoe.doe_repeat_center import repeat_center
__all__ = ['ccdesign']
def ccdesign(n, center=(4, 4), alpha='orthogonal', face='circumscribed'):
"""
Central composite design
Parameters
----------
n : int
The number of factors in the design.
Optional
--------
center : int array
A 1-by-2 array of integers, the number of center points in each block
of the design. (Default: (4, 4)).
alpha : str
A string describing the effect of alpha has on the variance. ``alpha``
can take on the following values:
1. 'orthogonal' or 'o' (Default)
2. 'rotatable' or 'r'
face : str
The relation between the start points and the corner (factorial) points.
There are three options for this input:
1. 'circumscribed' or 'ccc': This is the original form of the central
composite design. The star points are at some distance ``alpha``
from the center, based on the properties desired for the design.
The start points establish new extremes for the low and high
settings for all factors. These designs have circular, spherical,
or hyperspherical symmetry and require 5 levels for each factor.
Augmenting an existing factorial or resolution V fractional
factorial design with star points can produce this design.
2. 'inscribed' or 'cci': For those situations in which the limits
specified for factor settings are truly limits, the CCI design
uses the factors settings as the star points and creates a factorial
or fractional factorial design within those limits (in other words,
a CCI design is a scaled down CCC design with each factor level of
the CCC design divided by ``alpha`` to generate the CCI design).
This design also requires 5 levels of each factor.
3. 'faced' or 'ccf': In this design, the star points are at the center
of each face of the factorial space, so ``alpha`` = 1. This
variety requires 3 levels of each factor. Augmenting an existing
factorial or resolution V design with appropriate star points can
also produce this design.
Notes
-----
- Fractional factorial designs are not (yet) available here.
- 'ccc' and 'cci' can be rotatable design, but 'ccf' cannot.
- If ``face`` is specified, while ``alpha`` is not, then the default value
of ``alpha`` is 'orthogonal'.
Returns
-------
mat : 2d-array
The design matrix with coded levels -1 and 1
Example
-------
::
>>> ccdesign(3)
array([[-1. , -1. , -1. ],
[ 1. , -1. , -1. ],
[-1. , 1. , -1. ],
[ 1. , 1. , -1. ],
[-1. , -1. , 1. ],
[ 1. , -1. , 1. ],
[-1. , 1. , 1. ],
[ 1. , 1. , 1. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[-1.82574186, 0. , 0. ],
[ 1.82574186, 0. , 0. ],
[ 0. , -1.82574186, 0. ],
[ 0. , 1.82574186, 0. ],
[ 0. , 0. , -1.82574186],
[ 0. , 0. , 1.82574186],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]])
"""
# Check inputs
if not isinstance(n, int) and n > 1:
raise Exception('"n" must be an integer greater than 1.')
if not alpha.lower() in ('orthogonal', 'o', 'rotatable',
'r'):
raise Exception('Invalid value for "alpha": {:}'.format(alpha))
if not face.lower() in ('circumscribed', 'ccc', 'inscribed', 'cci',
'faced', 'ccf'):
raise Exception('Invalid value for "face": {:}'.format(face))
try:
nc = len(center)
except:
raise TypeError('Invalid value for "center": {:}. Expected a 1-by-2 array.'.format(center))
else:
if nc != 2:
raise ValueError('Invalid number of values for "center" (expected 2, but got {:})'.format(nc))
# Orthogonal Design
if alpha.lower() in ('orthogonal', 'o'):
H2, a = star(n, alpha='orthogonal', center=center)
# Rotatable Design
if alpha.lower() in ('rotatable', 'r'):
H2, a = star(n, alpha='rotatable')
# Inscribed CCD
if face.lower() in ('inscribed', 'cci'):
H1 = ff2n(n)
H1 = H1 / a # Scale down the factorial points
H2, a = star(n)
# Faced CCD
if face.lower() in ('faced', 'ccf'):
H2, a = star(n) # Value of alpha is always 1 in Faced CCD
H1 = ff2n(n)
# Circumscribed CCD
if face.lower() in ('circumscribed', 'ccc'):
H1 = ff2n(n)
C1 = repeat_center(n, center[0])
C2 = repeat_center(n, center[1])
H1 = union(H1, C1)
H2 = union(H2, C2)
H = union(H1, H2)
return H
| mit |
brendanator/atari-rl | agents/training.py | 1 | 3130 | import os
import tensorflow as tf
from threading import Thread
from networks.factory import NetworkFactory
import util
class Trainer(object):
def __init__(self, config):
util.log('Creating network and training operations')
self.config = config
# Creating networks
factory = NetworkFactory(config)
self.global_step, self.train_op = factory.create_train_ops()
self.reset_op = factory.create_reset_target_network_op()
self.agents = factory.create_agents()
self.summary = factory.create_summary()
def train(self):
self.training = True
util.log('Creating session and loading checkpoint')
session = tf.train.MonitoredTrainingSession(
checkpoint_dir=self.config.run_dir,
save_summaries_steps=0, # Summaries will be saved with train_op only
config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
with session:
if len(self.agents) == 1:
self.train_agent(session, self.agents[0])
else:
self.train_threaded(session)
util.log('Training complete')
def train_threaded(self, session):
threads = []
for i, agent in enumerate(self.agents):
thread = Thread(target=self.train_agent, args=(session, agent))
thread.name = 'Agent-%d' % (i + 1)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def train_agent(self, session, agent):
# Populate replay memory
if self.config.load_replay_memory:
util.log('Loading replay memory')
agent.replay_memory.load()
else:
util.log('Populating replay memory')
agent.populate_replay_memory()
# Initialize step counters
step, steps_until_train = 0, self.config.train_period
util.log('Starting training')
while self.training and step < self.config.num_steps:
# Start new episode
observation, _, done = agent.new_game()
# Play until losing
while not done:
self.reset_target_network(session, step)
action = agent.action(session, step, observation)
observation, _, done = agent.take_action(action)
step += 1
steps_until_train -= 1
if done or (steps_until_train == 0):
step = self.train_batch(session, agent.replay_memory, step)
steps_until_train = self.config.train_period
# Log episode
agent.log_episode(step)
if self.config.save_replay_memory:
agent.replay_memory.save()
def reset_target_network(self, session, step):
if self.reset_op:
if step > 0 and step % self.config.target_network_update_period == 0:
session.run(self.reset_op)
def train_batch(self, session, replay_memory, step):
fetches = [self.global_step, self.train_op] + self.summary.operation(step)
batch = replay_memory.sample_batch(fetches, self.config.batch_size)
if batch:
step, priorities, summary = session.run(fetches, batch.feed_dict())
batch.update_priorities(priorities)
self.summary.add_summary(summary, step)
return step
def stop_training(self):
util.log('Stopping training')
self.training = False
| mit |
bowlofstew/Impala | tests/query_test/test_hdfs_caching.py | 13 | 8247 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Validates limit on scan nodes
#
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3, SkipIfIsilon
from tests.util.shell_util import exec_process
# End to end test that hdfs caching is working.
@SkipIfS3.caching # S3: missing coverage: verify SET CACHED gives error
@SkipIfIsilon.caching
class TestHdfsCaching(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == "text")
# The tpch nation table is cached as part of data loading. We'll issue a query
# against this table and verify the metric is updated correctly.
@pytest.mark.execute_serially
def test_table_is_cached(self, vector):
cached_read_metric = "impala-server.io-mgr.cached-bytes-read"
query_string = "select count(*) from tpch.nation"
expected_bytes_delta = 2199
impala_cluster = ImpalaCluster()
# Collect the cached read metric on all the impalads before running the query
cached_bytes_before = list()
for impalad in impala_cluster.impalads:
cached_bytes_before.append(impalad.service.get_metric_value(cached_read_metric))
# Execute the query.
result = self.execute_query(query_string)
assert(len(result.data) == 1)
assert(result.data[0] == '25')
# Read the metrics again.
cached_bytes_after = list()
for impalad in impala_cluster.impalads:
cached_bytes_after.append(impalad.service.get_metric_value(cached_read_metric))
# Verify that the cached bytes increased by the expected number on exactly one of
# the impalads.
num_metrics_increased = 0
assert(len(cached_bytes_before) == len(cached_bytes_after))
for i in range(0, len(cached_bytes_before)):
assert(cached_bytes_before[i] == cached_bytes_after[i] or\
cached_bytes_before[i] + expected_bytes_delta == cached_bytes_after[i])
if cached_bytes_after[i] > cached_bytes_before[i]:
num_metrics_increased = num_metrics_increased + 1
if num_metrics_increased != 1:
# Test failed, print the metrics
for i in range(0, len(cached_bytes_before)):
print "%d %d" % (cached_bytes_before[i], cached_bytes_after[i])
assert(False)
def test_cache_cancellation(self, vector):
""" This query runs on some mix of cached and not cached tables. The query has
a limit so it exercises the cancellation paths. Regression test for
IMPALA-1019. """
num_iters = 100
query_string = """
with t1 as (select int_col x, bigint_col y from functional.alltypes limit 2),
t2 as (select int_col x, bigint_col y from functional.alltypestiny limit 2),
t3 as (select int_col x, bigint_col y from functional.alltypessmall limit 2)
select * from t1, t2, t3 where t1.x = t2.x and t2.x = t3.x """
# Run this query for some iterations since it is timing dependent.
for x in xrange(1, num_iters):
result = self.execute_query(query_string)
assert(len(result.data) == 2)
@SkipIfS3.caching
@SkipIfIsilon.caching
class TestHdfsCachingDdl(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHdfsCachingDdl, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and \
v.get_value('table_format').compression_codec == 'none')
def setup_method(self, method):
self.cleanup_db("cachedb")
self.client.execute("create database cachedb")
def teardown_method(self, method):
self.cleanup_db("cachedb")
@pytest.mark.execute_serially
def test_caching_ddl(self, vector):
# Get the number of cache requests before starting the test
num_entries_pre = get_num_cache_requests()
self.run_test_case('QueryTest/hdfs-caching', vector)
# After running this test case we should be left with 8 cache requests.
# In this case, 1 for each table + 7 more for each cached partition.
assert num_entries_pre == get_num_cache_requests() - 8
self.client.execute("drop table cachedb.cached_tbl_part")
self.client.execute("drop table cachedb.cached_tbl_nopart")
# Dropping the tables should cleanup cache entries leaving us with the same
# total number of entries
assert num_entries_pre == get_num_cache_requests()
@pytest.mark.execute_serially
def test_cache_reload_validation(self, vector):
"""This is a set of tests asserting that cache directives modified
outside of Impala are picked up after reload, cf IMPALA-1645"""
num_entries_pre = get_num_cache_requests()
create_table = ("create table cachedb.cached_tbl_reload "
"(id int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Access the table once to load the metadata
self.client.execute("select count(*) from cachedb.cached_tbl_reload")
create_table = ("create table cachedb.cached_tbl_reload_part (i int) "
"partitioned by (j int) cached in 'testPool' with replication = 8")
self.client.execute(create_table)
# Add two partitions
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=1)")
self.client.execute("alter table cachedb.cached_tbl_reload_part add partition (j=2)")
assert num_entries_pre + 4 == get_num_cache_requests(), \
"Adding the tables should be reflected by the number of cache directives."
# Modify the cache directive outside of Impala and reload the table to verify
# that changes are visible
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload")
drop_cache_directives_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part")
drop_cache_directives_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=1")
change_cache_directive_repl_for_path(
"/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2", 3)
# Create a bogus cached table abusing an existing cache directive ID, IMPALA-1750
dirid = get_cache_directive_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
self.client.execute(("create table cachedb.no_replication_factor (id int) " \
"tblproperties(\"cache_directive_id\"=\"%s\")" % dirid))
self.run_test_case('QueryTest/hdfs-caching-validation', vector)
def drop_cache_directives_for_path(path):
"""Drop the cache directive for a given path"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -removeDirectives -path %s" % path)
assert rc == 0, \
"Error removing cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_cache_directive_for_path(path):
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -path %s" % path)
assert rc == 0
dirid = re.search('^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, re.MULTILINE).group(1)
return dirid
def change_cache_directive_repl_for_path(path, repl):
"""Drop the cache directive for a given path"""
dirid = get_cache_directive_for_path(path)
rc, stdout, stderr = exec_process(
"hdfs cacheadmin -modifyDirective -id %s -replication %s" % (dirid, repl))
assert rc == 0, \
"Error modifying cache directive for path %s (%s, %s)" % (path, stdout, stderr)
def get_num_cache_requests():
"""Returns the number of outstanding cache requests"""
rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats")
assert rc == 0, 'Error executing hdfs cacheadmin: %s %s' % (stdout, stderr)
return len(stdout.split('\n'))
| apache-2.0 |
TeamExodus/external_chromium_org | build/android/gyp/aidl.py | 81 | 1552 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Invokes Android's aidl
"""
import optparse
import os
import sys
from util import build_utils
def main(argv):
option_parser = optparse.OptionParser()
build_utils.AddDepfileOption(option_parser)
option_parser.add_option('--aidl-path', help='Path to the aidl binary.')
option_parser.add_option('--imports', help='Files to import.')
option_parser.add_option('--includes',
help='Directories to add as import search paths.')
option_parser.add_option('--srcjar', help='Path for srcjar output.')
options, args = option_parser.parse_args(argv[1:])
with build_utils.TempDir() as temp_dir:
for f in args:
classname = os.path.splitext(os.path.basename(f))[0]
output = os.path.join(temp_dir, classname + '.java')
aidl_cmd = [options.aidl_path]
aidl_cmd += [
'-p' + s for s in build_utils.ParseGypList(options.imports)
]
if options.includes is not None:
aidl_cmd += [
'-I' + s for s in build_utils.ParseGypList(options.includes)
]
aidl_cmd += [
f,
output
]
build_utils.CheckOutput(aidl_cmd)
build_utils.ZipDir(options.srcjar, temp_dir)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
LukeMurphey/splunk-google-drive | src/bin/google_drive_app/pyasn1/codec/native/decoder.py | 15 | 7671 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
from pyasn1 import debug
from pyasn1 import error
from pyasn1.type import base
from pyasn1.type import char
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
__all__ = ['decode']
LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
class AbstractScalarDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
return asn1Spec.clone(pyObject)
class BitStringDecoder(AbstractScalarDecoder):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
return asn1Spec.clone(univ.BitString.fromBinaryString(pyObject))
class SequenceOrSetDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
componentsTypes = asn1Spec.componentType
for field in asn1Value:
if field in pyObject:
asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
return asn1Value
class SequenceOfOrSetOfDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
for pyValue in pyObject:
asn1Value.append(decodeFun(pyValue, asn1Spec.componentType), **options)
return asn1Value
class ChoiceDecoder(object):
def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
asn1Value = asn1Spec.clone()
componentsTypes = asn1Spec.componentType
for field in pyObject:
if field in componentsTypes:
asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
break
return asn1Value
tagMap = {
univ.Integer.tagSet: AbstractScalarDecoder(),
univ.Boolean.tagSet: AbstractScalarDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: AbstractScalarDecoder(),
univ.Null.tagSet: AbstractScalarDecoder(),
univ.ObjectIdentifier.tagSet: AbstractScalarDecoder(),
univ.Enumerated.tagSet: AbstractScalarDecoder(),
univ.Real.tagSet: AbstractScalarDecoder(),
univ.Sequence.tagSet: SequenceOrSetDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SequenceOrSetDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: AbstractScalarDecoder(),
char.NumericString.tagSet: AbstractScalarDecoder(),
char.PrintableString.tagSet: AbstractScalarDecoder(),
char.TeletexString.tagSet: AbstractScalarDecoder(),
char.VideotexString.tagSet: AbstractScalarDecoder(),
char.IA5String.tagSet: AbstractScalarDecoder(),
char.GraphicString.tagSet: AbstractScalarDecoder(),
char.VisibleString.tagSet: AbstractScalarDecoder(),
char.GeneralString.tagSet: AbstractScalarDecoder(),
char.UniversalString.tagSet: AbstractScalarDecoder(),
char.BMPString.tagSet: AbstractScalarDecoder(),
# useful types
useful.ObjectDescriptor.tagSet: AbstractScalarDecoder(),
useful.GeneralizedTime.tagSet: AbstractScalarDecoder(),
useful.UTCTime.tagSet: AbstractScalarDecoder()
}
# Put in ambiguous & non-ambiguous types for faster codec lookup
typeMap = {
univ.Integer.typeId: AbstractScalarDecoder(),
univ.Boolean.typeId: AbstractScalarDecoder(),
univ.BitString.typeId: BitStringDecoder(),
univ.OctetString.typeId: AbstractScalarDecoder(),
univ.Null.typeId: AbstractScalarDecoder(),
univ.ObjectIdentifier.typeId: AbstractScalarDecoder(),
univ.Enumerated.typeId: AbstractScalarDecoder(),
univ.Real.typeId: AbstractScalarDecoder(),
# ambiguous base types
univ.Set.typeId: SequenceOrSetDecoder(),
univ.SetOf.typeId: SequenceOfOrSetOfDecoder(),
univ.Sequence.typeId: SequenceOrSetDecoder(),
univ.SequenceOf.typeId: SequenceOfOrSetOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AbstractScalarDecoder(),
# character string types
char.UTF8String.typeId: AbstractScalarDecoder(),
char.NumericString.typeId: AbstractScalarDecoder(),
char.PrintableString.typeId: AbstractScalarDecoder(),
char.TeletexString.typeId: AbstractScalarDecoder(),
char.VideotexString.typeId: AbstractScalarDecoder(),
char.IA5String.typeId: AbstractScalarDecoder(),
char.GraphicString.typeId: AbstractScalarDecoder(),
char.VisibleString.typeId: AbstractScalarDecoder(),
char.GeneralString.typeId: AbstractScalarDecoder(),
char.UniversalString.typeId: AbstractScalarDecoder(),
char.BMPString.typeId: AbstractScalarDecoder(),
# useful types
useful.ObjectDescriptor.typeId: AbstractScalarDecoder(),
useful.GeneralizedTime.typeId: AbstractScalarDecoder(),
useful.UTCTime.typeId: AbstractScalarDecoder()
}
class Decoder(object):
# noinspection PyDefaultArgument
def __init__(self, tagMap, typeMap):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, pyObject, asn1Spec, **options):
if LOG:
debug.scope.push(type(pyObject).__name__)
LOG('decoder called at scope %s, working with type %s' % (debug.scope, type(pyObject).__name__))
if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item):
raise error.PyAsn1Error('asn1Spec is not valid (should be an instance of an ASN.1 Item, not %s)' % asn1Spec.__class__.__name__)
try:
valueDecoder = self.__typeMap[asn1Spec.typeId]
except KeyError:
# use base type for codec lookup to recover untagged types
baseTagSet = tag.TagSet(asn1Spec.tagSet.baseTag, asn1Spec.tagSet.baseTag)
try:
valueDecoder = self.__tagMap[baseTagSet]
except KeyError:
raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet)
if LOG:
LOG('calling decoder %s on Python type %s <%s>' % (type(valueDecoder).__name__, type(pyObject).__name__, repr(pyObject)))
value = valueDecoder(pyObject, asn1Spec, self, **options)
if LOG:
LOG('decoder %s produced ASN.1 type %s <%s>' % (type(valueDecoder).__name__, type(value).__name__, repr(value)))
debug.scope.pop()
return value
#: Turns Python objects of built-in types into ASN.1 objects.
#:
#: Takes Python objects of built-in types and turns them into a tree of
#: ASN.1 objects (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
#: may be a scalar or an arbitrary nested structure.
#:
#: Parameters
#: ----------
#: pyObject: :py:class:`object`
#: A scalar or nested Python objects
#:
#: Keyword Args
#: ------------
#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A pyasn1 type object to act as a template guiding the decoder. It is required
#: for successful interpretation of Python objects mapping into their ASN.1
#: representations.
#:
#: Returns
#: -------
#: : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: A scalar or constructed pyasn1 object
#:
#: Raises
#: ------
#: ~pyasn1.error.PyAsn1Error
#: On decoding errors
#:
#: Examples
#: --------
#: Decode native Python object into ASN.1 objects with ASN.1 schema
#:
#: .. code-block:: pycon
#:
#: >>> seq = SequenceOf(componentType=Integer())
#: >>> s, _ = decode([1, 2, 3], asn1Spec=seq)
#: >>> str(s)
#: SequenceOf:
#: 1 2 3
#:
decode = Decoder(tagMap, typeMap)
| apache-2.0 |
mairin/anaconda | pyanaconda/anaconda.py | 2 | 7956 | #!/usr/bin/python
#
# anaconda: The Red Hat Linux Installation program
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brent Fox <bfox@redhat.com>
# Mike Fulbright <msf@redhat.com>
# Jakub Jelinek <jakub@redhat.com>
# Jeremy Katz <katzj@redhat.com>
# Chris Lumens <clumens@redhat.com>
# Paul Nasrat <pnasrat@redhat.com>
# Erik Troan <ewt@rpath.com>
# Matt Wilson <msw@rpath.com>
#
import os
import sys
from pyanaconda.constants import ROOT_PATH
from tempfile import mkstemp
from pyanaconda.bootloader import get_bootloader
from pyanaconda import constants
from pyanaconda import addons
import logging
log = logging.getLogger("anaconda")
stdoutLog = logging.getLogger("anaconda.stdout")
class Anaconda(object):
def __init__(self):
from pyanaconda import desktop
self._bootloader = None
self.canReIPL = False
self.desktop = desktop.Desktop()
self.dir = None
self.displayMode = None
self.extraModules = []
self.id = None
self._instClass = None
self._intf = None
self.isHeadless = False
self.ksdata = None
self.mediaDevice = None
self.methodstr = None
self.opts = None
self._payload = None
self.proxy = None
self.proxyUsername = None
self.proxyPassword = None
self.reIPLMessage = None
self.rescue = False
self.rescue_mount = True
self.rootParts = None
self.stage2 = None
self._storage = None
self.updateSrc = None
self.mehConfig = None
# *sigh* we still need to be able to write this out
self.xdriver = None
@property
def bootloader(self):
if not self._bootloader:
self._bootloader = get_bootloader()
return self._bootloader
@property
def instClass(self):
if not self._instClass:
from pyanaconda.installclass import DefaultInstall
self._instClass = DefaultInstall()
return self._instClass
def _getInterface(self):
return self._intf
def _setInterface(self, v):
# "lambda cannot contain assignment"
self._intf = v
def _delInterface(self):
del self._intf
intf = property(_getInterface, _setInterface, _delInterface)
@property
def payload(self):
# Try to find the packaging payload class. First try the install
# class. If it doesn't give us one, fall back to the default.
if not self._payload:
klass = self.instClass.getBackend()
if not klass:
from pyanaconda.flags import flags
if flags.livecdInstall:
from pyanaconda.packaging.livepayload import LiveImagePayload
klass = LiveImagePayload
elif self.ksdata.method.method == "liveimg":
from pyanaconda.packaging.livepayload import LiveImageKSPayload
klass = LiveImageKSPayload
elif flags.dnf:
from pyanaconda.packaging.dnfpayload import DNFPayload as klass
else:
from pyanaconda.packaging.yumpayload import YumPayload
klass = YumPayload
self._payload = klass(self.ksdata)
return self._payload
@property
def protected(self):
import stat
specs = []
if os.path.exists("/run/initramfs/livedev") and \
stat.S_ISBLK(os.stat("/run/initramfs/livedev")[stat.ST_MODE]):
specs.append(os.readlink("/run/initramfs/livedev"))
if self.methodstr and self.methodstr.startswith("hd:"):
specs.append(self.methodstr[3:].split(":", 3)[0])
if self.stage2 and self.stage2.startswith("hd:"):
specs.append(self.stage2[3:].split(":", 3)[0])
return specs
@property
def storage(self):
if not self._storage:
import blivet
self._storage = blivet.Blivet(ksdata=self.ksdata)
if self.instClass.defaultFS:
self._storage.setDefaultFSType(self.instClass.defaultFS)
return self._storage
def dumpState(self):
from meh import ExceptionInfo
from meh.dump import ReverseExceptionDump
from inspect import stack as _stack
from traceback import format_stack
# Skip the frames for dumpState and the signal handler.
stack = _stack()[2:]
stack.reverse()
exn = ReverseExceptionDump(ExceptionInfo(None, None, stack),
self.mehConfig)
# gather up info on the running threads
threads = "\nThreads\n-------\n"
for thread_id, frame in sys._current_frames().iteritems():
threads += "\nThread %s\n" % (thread_id,)
threads += "".join(format_stack(frame))
# dump to a unique file
(fd, filename) = mkstemp(prefix="anaconda-tb-", dir="/tmp")
dump_text = exn.traceback_and_object_dump(self)
dump_text += threads
dump_text = dump_text.encode("utf-8")
os.write(fd, dump_text)
os.close(fd)
# append to a given file
with open("/tmp/anaconda-tb-all.log", "a+") as f:
f.write("--- traceback: %s ---\n" % filename)
f.write(dump_text + "\n")
def initInterface(self, addon_paths=None):
if self._intf:
raise RuntimeError("Second attempt to initialize the InstallInterface")
if self.displayMode == 'g':
from pyanaconda.ui.gui import GraphicalUserInterface
self._intf = GraphicalUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="gui")
elif self.displayMode in ['t', 'c']: # text and command line are the same
from pyanaconda.ui.tui import TextUserInterface
self._intf = TextUserInterface(self.storage, self.payload,
self.instClass)
# needs to be refreshed now we know if gui or tui will take place
addon_paths = addons.collect_addon_paths(constants.ADDON_PATHS,
ui_subdir="tui")
else:
raise RuntimeError("Unsupported displayMode: %s" % self.displayMode)
if addon_paths:
self._intf.update_paths(addon_paths)
def writeXdriver(self, root = None):
# this should go away at some point, but until it does, we
# need to keep it around.
if self.xdriver is None:
return
if root is None:
root = ROOT_PATH
if not os.path.isdir("%s/etc/X11" %(root,)):
os.makedirs("%s/etc/X11" %(root,), mode=0755)
f = open("%s/etc/X11/xorg.conf" %(root,), 'w')
f.write('Section "Device"\n\tIdentifier "Videocard0"\n\tDriver "%s"\nEndSection\n' % self.xdriver)
f.close()
| gpl-2.0 |
ibrahimsh/final-project-software-programming | HashTable.py | 1 | 22420 | __author__ = 'MatrixRev'
import codecs
import json
import re
from collections import Mapping
from collections import defaultdict
from collections import Counter
import Levenshtein
import nltk
from nltk.corpus import stopwords
import fileinput
import string
import os
import collections
import rdflib
from rdflib.namespace import OWL, RDF, RDFS,DC,FOAF
from rdflib import Graph, Literal, Namespace, URIRef,BNode
from rdflib.plugins.memory import IOMemory
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib import plugin
from tempfile import mkdtemp
from rdflib.store import Store, NO_STORE, VALID_STORE
filename='C:\\Users\\MatrixRev\\Desktop\\books\\output\\subjects.txt' # the input file
output='C:\\Users\\MatrixRev\\Desktop\\books\\output\\dictionnary.json'
filePath='C:\\Users\\MatrixRev\\Desktop\\books\\output\\foaf.rdf'
termfile='C:\\Users\\MatrixRev\\Desktop\\books\\output\\allTerms.txt'
out2='C:\\Users\\MatrixRev\\Desktop\\books\\output\\newprint.txt'
class HashTable():
def __init__(self):
self.allTerms={}
self.symbols={ '**a':'Personal name','**sub':'subString',
'**period':'period',
'**location':'location','**date':'date',
'**b':'Numeration',
'**c':'Titles and other words associated with a name',
'**e':'Relator term',
'**f':'Date of a work',
'**g':'Miscellaneous information',
'**h':'Medium A media qualifier.',
'**j':'Attribution qualifier',
#Attribution information for names when the responsibility is unknown, uncertain, fictitious, or pseudonymous.
'**k':'Form subheading',
#'**l':'Language of a work (NR)',
'**m':'Medium of performance for music',
'**n':'Number of part/section of a work',
'**o':'Arranged statement for music',
#'**p':'Name of part/section of a work (R)',
'**q':'Fuller form of name',
'**r':'Key for music',
#'**s':'Version (NR)',
'**t':'Title of a work',
'**u':'Affiliation',
'**v':'Form subdivision',
'**x':'General subdivision',
'**y':'Chronological subdivision',
'**z':'Geographic subdivision',
'**0':'Authority record control number or standard number',
#See description of this subfield in Appendix A: Control Subfields.
'**2':'Source of heading or term',
#Code from: Subject Heading and Term Source Codes.
'**3':'Materials specified',
'**4':'Relator code',
'**6':'Linkage',
#See description of this subfield in Appendix A: Control Subfields.
'**8':'Field link and sequence number',
'=':'isA'}
sym_keys=self.symbols.keys()
value=self.symbols.values()
def Relation(self):
return ({"@bookTitle":[],
"relation":[],
"objects":[],
"subClass":[],
"semilarTerms":[],
"@link":[],
"listOFWords":[]})
def fill_Terms(self,sub):
terms=self.Relation()
allterm=self.allTerms
subStr=""
if sub.find("***link"):
terms['@link'].append((self.setLink(sub).strip(" ")))
newIndex=sub.index("***link")
sub=sub[:newIndex]
if sub.rfind("***title"):
terms['@bookTitle'].append('@'+self.setTitle(sub).strip(" "))
newIndex=sub.index("***title")
subStr=sub[:newIndex]
#print("all sub str:",subStr)
if self.find_relation(subStr)==True:
for k,v in self.termValue(subStr).items():
terms.setdefault("relation",[]).append((k,v))
minStr=self.CompTermKey(subStr)
#print("the sub string is:",minStr)
if minStr.find('(')!=-1 and minStr.find(')')!=-1:
print("yest find para",self.if_FindParanthesis(minStr))
terms["relation"].append(self.getParanthises(minStr))
insidePara=self.inside_Paranthises(minStr)
#print("insidePrara",insidePara)
if self.if_compoTerm(insidePara)==True:
terms['listOFWords'].append(self.splitTerms(insidePara))
if self.check_If_Comma(insidePara)==True:
listOfobjects= self.findComma(insidePara)
i=len(listOfobjects)-1
print("i",i)
cls=listOfobjects[i]
terms["objects"].append(cls)
for c in range(0,i):
subClass=listOfobjects[c]
terms["subClass"].append(subClass)
for k in range(0,len(listOfobjects)):
minStr=listOfobjects[k]
minStr=minStr.strip(" ")
if self.check_terms(minStr)==True:
allterm[minStr]=self.updateTerm(minStr,terms)
elif self.check_terms(minStr)==False:
allterm[minStr]=terms
else:
minStr=insidePara
minStr=minStr.strip(" ")
if self.check_terms(minStr)==True:
allterm[minStr]=self.updateTerm(minStr,terms)
elif self.check_terms(minStr)==True:
allterm[minStr]=terms
elif self.check_If_Comma(minStr)==True:
listOfobjects= self.findComma(minStr)
i= len(listOfobjects)-1
print("listofbjects is :",listOfobjects[i])
cls=listOfobjects[i]
terms['objects'].append(cls)
for c in range(0,i):
subClass=listOfobjects[c]
terms["subClass"].append(subClass)
for t in range(0,len(listOfobjects)):
minStr=listOfobjects[t]
minStr=minStr.strip(" ")
if self.check_terms(minStr)==True:
#allterm[minStr].update(terms)
allterm[minStr]=self.updateTerm(minStr,terms)
elif self.check_terms(minStr)==False:
allterm[minStr]=terms
else:
if self.if_compoTerm(minStr)==True:
terms['listOFWords'].append(self.splitTerms(minStr))
minStr=minStr.strip(" ")
#minStr=minStr.lower()
if self.check_terms(minStr)==True:
allterm[minStr]=self.updateTerm(minStr,terms)
elif self.check_terms(minStr)==False:
allterm[minStr]=terms
elif self.find_relation(subStr)==False:
#subStr=subStr.lower()
if subStr.find('(')!=-1 and subStr.find(')')!=-1:
terms["relation"].append(self.getParanthises(subStr))
insidePara=self.inside_Paranthises(subStr)
#print("insidePrara",insidePara)
if self.if_compoTerm(insidePara)==True:
terms['listOFWords'].append(self.splitTerms(insidePara))
if self.check_If_Comma(insidePara)==True:
listOfobjects= self.findComma(insidePara)
i= len(listOfobjects)-1
cls=listOfobjects[i]
terms["objects"].append(cls)
for c in range(0,i):
subClass=listOfobjects[c]
terms["subClass"].append(subClass)
for t in range(0,len(listOfobjects)):
mterm=listOfobjects[t]
if self.check_terms(mterm)==True:
allterm[mterm]=self.updateTerm(mterm,terms)
elif self.check_terms(mterm)==False:
allterm[mterm]=terms
else:
insidePara=insidePara.strip('')
#insidePara=insidePara.lower()
if self.check_terms(insidePara)==True:
allterm[insidePara]=self.updateTerm(insidePara,terms)
#print("after update",terms)
elif self.check_terms(insidePara)==False:
allterm[insidePara]=terms
if self.if_compoTerm(insidePara)==True:
terms['listOFWords'].append(self.splitTerms(insidePara))
elif self.check_If_Comma(subStr)==True:
#subStr=subStr.strip(" ")
#subStr=subStr.lower()
listOfobjects= self.findComma(subStr)
i= len(listOfobjects)-1
cls=listOfobjects[i]
terms["objects"].append(cls)
for c in range(0,i):
subClass=listOfobjects[c]
terms["subClass"].append(subClass)
for e in range(0,len(listOfobjects)):
tterm=listOfobjects[e]
#tterm=tterm.strip(" ")
# tterm=tterm.lower()
if self.check_terms(tterm)==True:
allterm[tterm]=self.updateTerm(tterm,terms)
elif self.check_terms(tterm)==False:
allterm[tterm]=terms
else:
#subStr=subStr.strip(" ")
#subStr=subStr.lower()
if self.if_compoTerm(subStr)==True:
terms['listOFWords'].append(self.splitTerms(subStr))
if self.check_terms(subStr)==True:
allterm[subStr]=self.updateTerm(subStr,terms)
elif self.check_terms(subStr)==False:
allterm[subStr]=terms
else:
#subStr=subStr.strip('')
#subStr=subStr.lower()
if self.if_compoTerm(subStr)==True:
terms['listOFWords'].append(self.splitTerms(subStr))
if self.check_terms(subStr)==True:
allterm[subStr]=self.updateTerm(subStr,terms)
elif self.check_terms(subStr)==False:
allterm[subStr]=terms
return allterm
def updateTerm(self,term,rvalue):
tvalue=self.allTerms[term]
for key in tvalue.keys():
for k in rvalue.keys():
if key == k:
if key is 'listOFWords':
break
else:
for i in rvalue[k]:
if i in tvalue[key]:
break
else:
tvalue[key].append(i)
return tvalue
def similarTerms(self,target):
the_same=[]
counter=0
with codecs.open(termfile,'rb',encoding='utf-8')as tf:
list_of_t=tf.readlines()
for item in list_of_t:
item=item.strip('\n')
if item!=target:
if self.if_compoTerm(target):
List_target=self.splitTerms(target)
for t in List_target:
if item.find(t)!=-1:
if item not in the_same:
dist=Levenshtein.distance(item,target)
print("the dist:",dist)
if item!=target:
the_same.append(item)
if Levenshtein.ratio(t,item)==0.8:
if item not in the_same:
if re.fullmatch(item,target):
the_same.append(item)
#print("the ratio is ",the_ratio)
#print("is",the_same)
return the_same
def setLink(self,subject):
if subject.find("***link"):
linkIndex=subject.index("***link")
newIndex=linkIndex+len("***link")+1
link=subject[newIndex:]
return link
def setTitle(self,sub):
if sub.find("***title"):
titleIndex=sub.index("***title")
newIndex=len("***title")+titleIndex
title=sub[newIndex:].strip(":")
return title
def find_relation(self, subjects):
#print("relation is check:",subjects)
counter=0
is_relation=True
sub_len = len(subjects)
for key in self.symbols.keys():
if subjects.rfind(key)!=-1:
counter=counter+1
if counter>0:
print(counter)
is_relation=True
else:
is_relation=False
print("if the relation",is_relation)
return is_relation
def termValue(self,subject):
#longVal={}
if subject.find('etc')!=-1or subject.rfind(',etc.')!=-1 or subject.rfind(',etc')!=-1:
subject=subject.replace('etc.',' ')
subject=subject.replace(',etc.',' ')
subject=subject.strip('etc')
if subject.find('*')!=-1:
sindex=subject.index('*')
nsubject=subject[sindex:]
list_of_values=list(nsubject.split('*'))
longVal={}
for i in list_of_values:
if i=='':
list_of_values.remove(i)
for key in self.symbols.keys():
for sym in list_of_values:
sym='**'+sym
if sym.find(key)!=-1:
sym_index=sym.index(key)
tofsym=len(key)+sym_index
nsym=sym[tofsym:].strip(':').strip(' ')
longVal.setdefault(self.symbols[key],[]).append(nsym)
return longVal
def findComma(self,sub):
newSub=[]
if sub.find(',')!=-1:
for s in sub.split(','):
newSub.append(s)
return newSub
def getParanthises(self,sub):
# regex = re.compile('('+')')
longVal=collections.defaultdict(list)
if sub.find('(')!=-1 and sub.find(')')!=-1:
termValue=sub.strip('-')
termValue=termValue.strip(' ')
# print("the last char ",termValue[len(termValue)-1])
#print("term length",len(termValue))
#print(termValue)
tindex=termValue.index('(')
print("the (",tindex)
eindex=termValue.index(')')
print("the )",eindex)
if eindex>tindex and eindex<len(termValue)-1:
nValue=termValue[eindex+1:]
longVal.setdefault('aspect_of',[]).append(nValue)
elif tindex>eindex and tindex<len(termValue)-1:
nValue=termValue[tindex+1:]
#longVal.append({"aspect_of":nValue})
longVal.setdefault('aspect_of',[]).append(nValue)
elif eindex==len(termValue)-1and tindex<eindex:
nValue=termValue[:tindex-1]
#longVal.append({"aspect_of":nValue})
longVal.setdefault('aspect_of',[]).append(nValue)
elif tindex==len(termValue)-1and tindex>eindex:
nValue=termValue[:eindex-1]
# longVal.append({"aspect_of ":nValue})
longVal.setdefault('aspect_of',[]).append(nValue)
return longVal
def inside_Paranthises(self,sub):
term=""
if sub.rfind('(')!=-1 and sub.rfind(')')!=-1:
s=sub.index('(')
f=sub.index(')')
print("start",s,"end with",f)
if s>f:
term=sub[f:s]
term=term.strip(")/(")
elif s<f:
term=sub[s:f]
term=term.strip(")/(")
return term
'''
def CompTermKey(self,sub):
NumOfKeys=0
term=""
counter=0
#regex = re.compile('('+')')
kindex=[]
if sub.find('***Title')!=-1:
title_index=sub.index('***Title')
sub=sub[:title_index]
for key in self.symbols.keys():
if sub.find(key)!=-1:
key_length=len(key)
key_index=sub.index(key)
kindex.append(key_index)
counter=counter+1
print('the key',key_index)
if len(kindex)>0:
for i in range(len(kindex)):
min=kindex[0]
if kindex[i]<min:
min=kindex[i]
if min >0:
term=sub[:min]
return term
'''
def CompTermKey(self,sub):
t=''
tindex=0
if sub.find('*')!=-1:
tindex=sub.index('*')
t=sub[:tindex]
return t
def if_FindParanthesis(self,sub):
#regex = re.compile('('+')')
yes=True
if sub.find('(')and sub.find(')'):
yes= True
else:
yes=False
return yes
def check_If_Comma(self,subject):
if subject.find(",")!=-1:
return True
else:
return False
def check_terms(self,term):
existTerm= True
if term in self.allTerms.keys():
existTerm=True
else:
existTerm=False
return existTerm
def check_value(self,val):
if val in self.terms.values():
return False
else:
return True
def if_compoTerm(self,sub):
arr=sub.split()
counter=0
for a in arr:
counter=counter+1
if counter>1:
return True
else:
return False
def splitTerms(self,subString):
allWords=[]
for world in subString.split(' '):
allWords.append(world)
return (allWords)
def printTerms(self,output):
with codecs.open(output,'w',encoding='utf-8')as outfile:
for item in self.allTerms.items():
#print("term:",k,outfile)
json.dump(item,outfile,ensure_ascii=False,indent=4)
# return self.terms
#def Find_similarity(self,source,target):
def num_there(self,txt):
counter=0
for c in txt:
if c.isdigit():
counter=counter+1
if counter>=1:
return True
else:
return False
def create_ontology(self,tr,predicate,subClass,address,booktitle):
LDT= Namespace("http://www.JceFinalProjectOntology.com/")
ut=Namespace("http://www.JceFinalProjectOntology.com/subject/#")
usubClass=URIRef("http://www.JceFinalProjectOntology.com/subject/"+subClass.strip()+'#')
#LDT.subClass=LDT[subClass]
print(ut)
print(usubClass)
store=IOMemory()
sty=LDT[predicate]
g = rdflib.Graph(store=store,identifier=LDT)
t = ConjunctiveGraph(store=store,identifier=ut)
print ('Triples in graph before add: ', len(t))
#g.add((LDT,RDF.type,RDFS.Class))
g.add((URIRef(LDT),RDF.type,RDFS.Class))
g.add((URIRef(LDT),RDFS.label,Literal("JFPO")))
g.add((URIRef(LDT),RDFS.comment,Literal('class of all properties')))
for v in self.symbols.values():
if self.if_compoTerm(v)==True:
vs=self.splitTerms(v)[0]
else:
vs =v
g.add((LDT[vs],RDF.type,RDF.Property))
g.add((LDT[vs],RDFS.label,Literal('has'+vs)))
g.add((LDT[vs],RDFS.comment,Literal(v)))
g.add((LDT[vs],RDFS.range,OWL.Class))
g.add((LDT[vs],RDFS.domain,Literal(vs)))
g.bind('JFPO',LDT)
#g.commit()
g.serialize('trtst.rdf',format='turtle')
t.add( (ut[tr], RDF.type,OWL.Class) )
t.add((ut[tr],RDFS.subClassOf,OWL.Thing))
t.add((ut[tr],RDFS.label,Literal(tr)))
t.add((ut[tr],DC.title,Literal(booktitle)))
t.add((ut[tr],DC.source,Literal(address)))
t.add((ut[tr],DC[predicate],URIRef(usubClass)))
t.add((ut[tr],LDT[predicate],RDF.Property))
t.add((ut[tr],DC[predicate],URIRef(usubClass)))
t.add((ut[tr],DC[predicate],URIRef(usubClass)))
relation='has'+predicate
t.add((ut[tr],LDT.term(predicate),URIRef(usubClass)))
t.add( (usubClass,RDF.type,OWL.Class))
t.add((usubClass,RDFS.subClassOf,OWL.Thing))
t.add((usubClass,RDFS.subClassOf,URIRef(sty)))
t.add((usubClass,RDFS.label,Literal(subClass)))
#tc=Graph(store=store,identifier=usubClass)
t.bind("dc", "http://http://purl.org/dc/elements/1.1/")
t.bind('JFPO',LDT)
t.commit()
#print(t.serialize(format='pretty-xml'))
t.serialize('test2.owl',format='turtle')
'''
ald=Levenshtein.editops("science computer","computer science")
print(ald)
edit=Levenshtein.distance("computer science","computer science")
dist=Levenshtein.apply_edit(ald,"science computer","computer science")
print(dist)
print(edit)
'''
table=HashTable()
rdf=table.create_ontology('naturalTresure','location','amman','bokAddr','/t/welcm.json') # function to create ontology
print(rdf)
'''
with codecs.open(output,'w',encoding='utf-8')as outfile:
for k,v in table.allTerms.items():
#if len(k) !=0:
#melon["item"]=list(k)
#melon["Relation"]=list(v)
json.dump({"item":k,"subClasses":v},outfile,ensure_ascii=False,indent=4)
#json.dump(melon,outfile,ensure_ascii=False,indent=4)
''''''
with codecs.open(os.path.join(filePath,"allTerms.txt"),'w',encoding='utf-8')as termFile:
list_of_terms= table.allTerms.keys()
for k in list_of_terms:
print(k,'\n',file=termFile)
with codecs.open(os.path.join(filePath,"value.json"),'w',encoding='utf-8')as valueFile:
list_of_values=table.allTerms.values()
for v in list_of_values:
json.dump(v,valueFile,ensure_ascii=False,indent=4)
'''
| apache-2.0 |
dscorbett/pygments | pygments/lexers/inferno.py | 4 | 3117 | # -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
(r'[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
default('statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
| bsd-2-clause |
wilmoz/servo | components/script/dom/bindings/codegen/parser/tests/test_implements.py | 264 | 5961 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
# Basic functionality
threw = False
try:
parser.parse("""
A implements B;
interface B {
attribute long x;
};
interface A {
attribute long y;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Should not have thrown on implements statement "
"before interfaces")
harness.check(len(results), 3, "We have three statements")
harness.ok(isinstance(results[1], WebIDL.IDLInterface), "B is an interface")
harness.check(len(results[1].members), 1, "B has one member")
A = results[2]
harness.ok(isinstance(A, WebIDL.IDLInterface), "A is an interface")
harness.check(len(A.members), 2, "A has two members")
harness.check(A.members[0].identifier.name, "y", "First member is 'y'")
harness.check(A.members[1].identifier.name, "x", "Second member is 'x'")
# Duplicated member names not allowed
threw = False
try:
parser.parse("""
C implements D;
interface D {
attribute long x;
};
interface C {
attribute long x;
};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface duplicating "
"a name on base interface")
# Same, but duplicated across implemented interfaces
threw = False
try:
parser.parse("""
E implements F;
E implements G;
interface F {
attribute long x;
};
interface G {
attribute long x;
};
interface E {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across indirectly implemented interfaces
threw = False
try:
parser.parse("""
H implements I;
H implements J;
I implements K;
interface K {
attribute long x;
};
interface L {
attribute long x;
};
interface I {};
interface J : L {};
interface H {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on indirectly implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across an implemented interface and its parent
threw = False
try:
parser.parse("""
M implements N;
interface O {
attribute long x;
};
interface N : O {
attribute long x;
};
interface M {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface and its "
"ancestor duplicating member names")
# Reset the parser so we can actually find things where we expect
# them in the list
parser = parser.reset()
# Diamonds should be allowed
threw = False
try:
parser.parse("""
P implements Q;
P implements R;
Q implements S;
R implements S;
interface Q {};
interface R {};
interface S {
attribute long x;
};
interface P {};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Diamond inheritance is fine")
harness.check(results[6].identifier.name, "S", "We should be looking at 'S'")
harness.check(len(results[6].members), 1, "S should have one member")
harness.check(results[6].members[0].identifier.name, "x",
"S's member should be 'x'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestInterface implements TestCallbackInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the right-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestCallbackInterface implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the left-hand side of "
"'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
Dict implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the left-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
TestInterface implements Dict;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the right-hand side "
"of 'implements'")
| mpl-2.0 |
dfalt974/SickRage | lib/rebulk/chain.py | 13 | 15353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chain patterns and handle repetiting capture group
"""
# pylint: disable=super-init-not-called
import itertools
from .loose import call, set_defaults
from .match import Match, Matches
from .pattern import Pattern, filter_match_kwargs
from .remodule import re
class _InvalidChainException(Exception):
"""
Internal exception raised when a chain is not valid
"""
pass
class Chain(Pattern):
"""
Definition of a pattern chain to search for.
"""
def __init__(self, rebulk, chain_breaker=None, **kwargs):
call(super(Chain, self).__init__, **kwargs)
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
self._defaults = {}
self._regex_defaults = {}
self._string_defaults = {}
self._functional_defaults = {}
if callable(chain_breaker):
self.chain_breaker = chain_breaker
else:
self.chain_breaker = None
self.rebulk = rebulk
self.parts = []
def defaults(self, **kwargs):
"""
Define default keyword arguments for all patterns
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._defaults = kwargs
return self
def regex_defaults(self, **kwargs):
"""
Define default keyword arguments for functional patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._regex_defaults = kwargs
return self
def string_defaults(self, **kwargs):
"""
Define default keyword arguments for string patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._string_defaults = kwargs
return self
def functional_defaults(self, **kwargs):
"""
Define default keyword arguments for functional patterns.
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
self._functional_defaults = kwargs
return self
def chain(self):
"""
Add patterns chain, using configuration from this chain
:return:
:rtype:
"""
# pylint: disable=protected-access
chain = self.rebulk.chain(**self._kwargs)
chain._defaults = dict(self._defaults)
chain._regex_defaults = dict(self._regex_defaults)
chain._functional_defaults = dict(self._functional_defaults)
chain._string_defaults = dict(self._string_defaults)
return chain
def regex(self, *pattern, **kwargs):
"""
Add re pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._regex_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_re(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def functional(self, *pattern, **kwargs):
"""
Add functional pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_functional(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
set_defaults(self._kwargs, kwargs)
set_defaults(self._functional_defaults, kwargs)
set_defaults(self._defaults, kwargs)
pattern = self.rebulk.build_string(*pattern, **kwargs)
part = ChainPart(self, pattern)
self.parts.append(part)
return part
def close(self):
"""
Close chain builder to continue registering other pattern
:return:
:rtype:
"""
return self.rebulk
def _match(self, pattern, input_string, context=None):
# pylint: disable=too-many-locals,too-many-nested-blocks
chain_matches = []
chain_input_string = input_string
offset = 0
while offset < len(input_string):
chain_found = False
current_chain_matches = []
valid_chain = True
is_chain_start = True
for chain_part in self.parts:
try:
chain_part_matches, raw_chain_part_matches = Chain._match_chain_part(is_chain_start, chain_part,
chain_input_string,
context)
Chain._fix_matches_offset(chain_part_matches, input_string, offset)
Chain._fix_matches_offset(raw_chain_part_matches, input_string, offset)
if raw_chain_part_matches:
grouped_matches_dict = dict()
for match_index, match in itertools.groupby(chain_part_matches,
lambda m: m.match_index):
grouped_matches_dict[match_index] = list(match)
grouped_raw_matches_dict = dict()
for match_index, raw_match in itertools.groupby(raw_chain_part_matches,
lambda m: m.match_index):
grouped_raw_matches_dict[match_index] = list(raw_match)
for match_index, grouped_raw_matches in grouped_raw_matches_dict.items():
chain_found = True
offset = grouped_raw_matches[-1].raw_end
chain_input_string = input_string[offset:]
if not chain_part.is_hidden:
grouped_matches = grouped_matches_dict.get(match_index, [])
if self._chain_breaker_eval(current_chain_matches + grouped_matches):
current_chain_matches.extend(grouped_matches)
except _InvalidChainException:
valid_chain = False
if current_chain_matches:
offset = current_chain_matches[0].raw_end
break
is_chain_start = False
if not chain_found:
break
if current_chain_matches and valid_chain:
match = self._build_chain_match(current_chain_matches, input_string)
chain_matches.append(match)
return chain_matches
def _match_parent(self, match, yield_parent):
"""
Handle a parent match
:param match:
:type match:
:param yield_parent:
:type yield_parent:
:return:
:rtype:
"""
ret = super(Chain, self)._match_parent(match, yield_parent)
original_children = Matches(match.children)
original_end = match.end
while not ret and match.children:
last_pattern = match.children[-1].pattern
last_pattern_children = [child for child in match.children if child.pattern == last_pattern]
last_pattern_groups_iter = itertools.groupby(last_pattern_children, lambda child: child.match_index)
last_pattern_groups = {}
for index, matches in last_pattern_groups_iter:
last_pattern_groups[index] = list(matches)
for index in reversed(list(last_pattern_groups)):
last_matches = list(last_pattern_groups[index])
for last_match in last_matches:
match.children.remove(last_match)
match.end = match.children[-1].end if match.children else match.start
ret = super(Chain, self)._match_parent(match, yield_parent)
if ret:
return True
match.children = original_children
match.end = original_end
return ret
def _build_chain_match(self, current_chain_matches, input_string):
start = None
end = None
for match in current_chain_matches:
if start is None or start > match.start:
start = match.start
if end is None or end < match.end:
end = match.end
match = call(Match, start, end, pattern=self, input_string=input_string, **self._match_kwargs)
for chain_match in current_chain_matches:
if chain_match.children:
for child in chain_match.children:
match.children.append(child)
if chain_match not in match.children:
match.children.append(chain_match)
chain_match.parent = match
return match
def _chain_breaker_eval(self, matches):
return not self.chain_breaker or not self.chain_breaker(Matches(matches))
@staticmethod
def _fix_matches_offset(chain_part_matches, input_string, offset):
for chain_part_match in chain_part_matches:
if chain_part_match.input_string != input_string:
chain_part_match.input_string = input_string
chain_part_match.end += offset
chain_part_match.start += offset
if chain_part_match.children:
Chain._fix_matches_offset(chain_part_match.children, input_string, offset)
@staticmethod
def _match_chain_part(is_chain_start, chain_part, chain_input_string, context):
chain_part_matches, raw_chain_part_matches = chain_part.pattern.matches(chain_input_string, context,
with_raw_matches=True)
chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part,
chain_input_string)
raw_chain_part_matches = Chain._truncate_chain_part_matches(is_chain_start, raw_chain_part_matches, chain_part,
chain_input_string)
Chain._validate_chain_part_matches(raw_chain_part_matches, chain_part)
return chain_part_matches, raw_chain_part_matches
@staticmethod
def _truncate_chain_part_matches(is_chain_start, chain_part_matches, chain_part, chain_input_string):
if not chain_part_matches:
return chain_part_matches
if not is_chain_start:
separator = chain_input_string[0:chain_part_matches[0].initiator.raw_start]
if separator:
return []
j = 1
for i in range(0, len(chain_part_matches) - 1):
separator = chain_input_string[chain_part_matches[i].initiator.raw_end:
chain_part_matches[i + 1].initiator.raw_start]
if separator:
break
j += 1
truncated = chain_part_matches[:j]
if chain_part.repeater_end is not None:
truncated = [m for m in truncated if m.match_index < chain_part.repeater_end]
return truncated
@staticmethod
def _validate_chain_part_matches(chain_part_matches, chain_part):
max_match_index = -1
if chain_part_matches:
max_match_index = max([m.match_index for m in chain_part_matches])
if max_match_index + 1 < chain_part.repeater_start:
raise _InvalidChainException
@property
def match_options(self):
return {}
@property
def patterns(self):
return [self]
def __repr__(self):
defined = ""
if self.defined_at:
defined = "@%s" % (self.defined_at,)
return "<%s%s:%s>" % (self.__class__.__name__, defined, self.parts)
class ChainPart(object):
"""
Part of a pattern chain.
"""
def __init__(self, chain, pattern):
self._chain = chain
self.pattern = pattern
self.repeater_start = 1
self.repeater_end = 1
self._hidden = False
def chain(self):
"""
Add patterns chain, using configuration from this chain
:return:
:rtype:
"""
return self._chain.chain()
def hidden(self, hidden=True):
"""
Hide chain part results from global chain result
:param hidden:
:type hidden:
:return:
:rtype:
"""
self._hidden = hidden
return self
@property
def is_hidden(self):
"""
Check if the chain part is hidden
:return:
:rtype:
"""
return self._hidden
def regex(self, *pattern, **kwargs):
"""
Add re pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.regex(*pattern, **kwargs)
def functional(self, *pattern, **kwargs):
"""
Add functional pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.functional(*pattern, **kwargs)
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:param kwargs:
:type kwargs:
:return:
:rtype:
"""
return self._chain.string(*pattern, **kwargs)
def close(self):
"""
Close the chain builder to continue registering other patterns
:return:
:rtype:
"""
return self._chain.close()
def repeater(self, value):
"""
Define the repeater of the current chain part.
:param value:
:type value:
:return:
:rtype:
"""
try:
value = int(value)
self.repeater_start = value
self.repeater_end = value
return self
except ValueError:
pass
if value == '+':
self.repeater_start = 1
self.repeater_end = None
if value == '*':
self.repeater_start = 0
self.repeater_end = None
elif value == '?':
self.repeater_start = 0
self.repeater_end = 1
else:
match = re.match(r'\{\s*(\d*)\s*,?\s*(\d*)\s*\}', value)
if match:
start = match.group(1)
end = match.group(2)
if start or end:
self.repeater_start = int(start) if start else 0
self.repeater_end = int(end) if end else None
return self
def __repr__(self):
return "%s({%s,%s})" % (self.pattern, self.repeater_start, self.repeater_end)
| gpl-3.0 |
bpyoung92/apprtc | src/app_engine/apprtc_test.py | 5 | 6431 | # Copyright 2014 Google Inc. All Rights Reserved.
import json
import time
import unittest
import webtest
import analytics
import apprtc
import constants
import probers
from test_util import CapturingFunction
from test_util import ReplaceFunction
from google.appengine.api import memcache
from google.appengine.ext import testbed
class MockRequest(object):
def get(self, key):
return None
class AppRtcUnitTest(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
def tearDown(self):
self.testbed.deactivate()
def testGenerateRandomGeneratesStringOfRightLength(self):
self.assertEqual(17, len(apprtc.generate_random(17)))
self.assertEqual(23, len(apprtc.generate_random(23)))
class AppRtcPageHandlerTest(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_memcache_stub()
self.test_app = webtest.TestApp(apprtc.app)
# Fake out event reporting.
self.time_now = time.time()
# Fake out event reporting and capture arguments.
self.report_event_replacement = ReplaceFunction(
analytics,
'report_event',
CapturingFunction())
def tearDown(self):
self.testbed.deactivate()
del self.report_event_replacement
def makeGetRequest(self, path):
# PhantomJS uses WebKit, so Safari is closest to the thruth.
return self.test_app.get(path, headers={'User-Agent': 'Safari'})
def makePostRequest(self, path, body=''):
return self.test_app.post(path, body, headers={'User-Agent': 'Safari'})
def verifyJoinSuccessResponse(self, response, is_initiator, room_id):
self.assertEqual(response.status_int, 200)
response_json = json.loads(response.body)
self.assertEqual('SUCCESS', response_json['result'])
params = response_json['params']
caller_id = params['client_id']
self.assertTrue(len(caller_id) > 0)
self.assertEqual(json.dumps(is_initiator), params['is_initiator'])
self.assertEqual(room_id, params['room_id'])
self.assertEqual([], params['error_messages'])
self.assertEqual([], params['warning_messages'])
return caller_id
def testConnectingWithoutRoomIdServesIndex(self):
response = self.makeGetRequest('/')
self.assertEqual(response.status_int, 200)
self.assertNotRegexpMatches(response.body, 'roomId:')
def testConnectingWithRoomIdServesIndex(self):
response = self.makeGetRequest('/r/testRoom')
self.assertEqual(response.status_int, 200)
self.assertRegexpMatches(response.body, 'roomId: \'testRoom\'')
def testJoinAndLeave(self):
room_id = 'foo'
# Join the caller.
response = self.makePostRequest('/join/' + room_id)
caller_id = self.verifyJoinSuccessResponse(response, True, room_id)
# Join the callee.
response = self.makePostRequest('/join/' + room_id)
callee_id = self.verifyJoinSuccessResponse(response, False, room_id)
# The third user will get an error.
response = self.makePostRequest('/join/' + room_id)
self.assertEqual(response.status_int, 200)
response_json = json.loads(response.body)
self.assertEqual('FULL', response_json['result'])
# The caller and the callee leave.
self.makePostRequest('/leave/' + room_id + '/' + caller_id)
self.makePostRequest('/leave/' + room_id + '/' + callee_id)
# Another user becomes the new caller.
response = self.makePostRequest('/join/' + room_id)
caller_id = self.verifyJoinSuccessResponse(response, True, room_id)
self.makePostRequest('/leave/' + room_id + '/' + caller_id)
def testCallerMessagesForwardedToCallee(self):
room_id = 'foo'
# Join the caller.
response = self.makePostRequest('/join/' + room_id)
caller_id = self.verifyJoinSuccessResponse(response, True, room_id)
# Caller's messages should be saved.
messages = ['1', '2', '3']
path = '/message/' + room_id + '/' + caller_id
for msg in messages:
response = self.makePostRequest(path, msg)
response_json = json.loads(response.body)
self.assertEqual('SUCCESS', response_json['result'])
response = self.makePostRequest('/join/' + room_id)
callee_id = self.verifyJoinSuccessResponse(response, False, room_id)
received_msgs = json.loads(response.body)['params']['messages']
self.assertEqual(messages, received_msgs)
self.makePostRequest('/leave/' + room_id + '/' + caller_id)
self.makePostRequest('/leave/' + room_id + '/' + callee_id)
def setWssHostStatus(self, index1, status1, index2, status2):
probing_results = {}
probing_results[constants.WSS_HOST_PORT_PAIRS[index1]] = {
constants.WSS_HOST_IS_UP_KEY: status1
}
probing_results[constants.WSS_HOST_PORT_PAIRS[index2]] = {
constants.WSS_HOST_IS_UP_KEY: status2
}
probers.ProbeColliderPage().store_instance_state(probing_results)
def verifyRequest(self, expectedIndex):
request = MockRequest()
wss_url, wss_post_url = apprtc.get_wss_parameters(request)
self.assertIn(constants.WSS_HOST_PORT_PAIRS[expectedIndex], wss_url)
self.assertIn(constants.WSS_HOST_PORT_PAIRS[expectedIndex], wss_post_url)
def testGetWssHostParameters(self):
request = MockRequest()
# With no status set, should use fallback.
self.verifyRequest(0)
# With an invalid value in memcache, should use fallback.
memcache_client = memcache.Client()
memcache_client.set(constants.WSS_HOST_ACTIVE_HOST_KEY, 'abc')
self.verifyRequest(0)
# With an invalid value in memcache, should use fallback.
memcache_client = memcache.Client()
memcache_client.set(constants.WSS_HOST_ACTIVE_HOST_KEY, ['abc', 'def'])
self.verifyRequest(0)
# With both hosts failing, should use fallback.
self.setWssHostStatus(0, False, 1, False)
self.verifyRequest(0)
# Second host passing.
self.setWssHostStatus(0, False, 1, True)
self.verifyRequest(1)
# Both hosts passing, but second host for longer.
self.setWssHostStatus(1, True, 0, True)
self.verifyRequest(1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
AveryOS/binutils | gdb/testsuite/gdb.python/py-framefilter-invalidarg.py | 46 | 1937 | # Copyright (C) 2014-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests Python-based
# frame-filters.
import gdb
import itertools
from gdb.FrameDecorator import FrameDecorator
import copy
class Reverse_Function (FrameDecorator):
def __init__(self, fobj):
super(Reverse_Function, self).__init__(fobj)
self.fobj = fobj
def function (self):
fname = str (self.fobj.function())
if (fname == None or fname == ""):
return None
if fname == 'end_func':
extra = self.fobj.inferior_frame().read_var('str').string()
else:
extra = ''
fname = fname[::-1] + extra
return fname
class FrameFilter ():
def __init__ (self):
self.name = "Reverse"
self.priority = 100
self.enabled = True
gdb.frame_filters [self.name] = self
def filter (self, frame_iter):
# Python 3.x moved the itertools.imap functionality to map(),
# so check if it is available.
if hasattr(itertools, "imap"):
frame_iter = itertools.imap (Reverse_Function,
frame_iter)
else:
frame_iter = map(Reverse_Function, frame_iter)
return frame_iter
FrameFilter()
| gpl-2.0 |
onlineconvert/onlineconvert-api-sdk-python3 | client/jobs_api.py | 1 | 11925 | #!/usr/bin/env python
"""
JobsApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from .models import *
class JobsApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def jobs_get(self, **kwargs):
"""List of jobs active for the current user identified by the key.
It will return the list of jobs for the given user. In order to get the jobs a key or token must be provided:\n - If the user key is provided all jobs for the current will be return.\n - If one token is provided it will return the job assigned to that token if any.\n \nThe request is paginated with an amount of 50 elements per page in any case.\n
Args:
status, str: Filter the status of the job. (required)
x_oc_token, str: Token for authentication for the current job (required)
x_oc_api_key, str: Api key for the user to filter. (required)
page, number: Pagination for list of elements. (required)
Returns: list[Job]
"""
allParams = ['status', 'x_oc_token', 'x_oc_api_key', 'page']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_get" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('status' in params):
queryParams['status'] = self.apiClient.toPathValue(params['status'])
if ('page' in params):
queryParams['page'] = self.apiClient.toPathValue(params['page'])
if ('x_oc_token' in params):
headerParams['x_oc_token'] = params['x_oc_token']
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Job]')
return responseObject
def jobs_post(self, **kwargs):
"""Creates a new Job with the user key.
Args:
x_oc_api_key, str: Api key for the user to filter. (required)
body, Job: Content of the job. (required)
Returns: Job
"""
allParams = ['x_oc_api_key', 'body']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_post" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Job')
return responseObject
def jobs_job_id_get(self, **kwargs):
"""Get information about a Job
Args:
x_oc_token, str: Token for authentication for the current job (required)
x_oc_api_key, str: Api key for the user to filter. (required)
job_id, str: ID of job that needs to be fetched (required)
Returns: Job
"""
allParams = ['x_oc_token', 'x_oc_api_key', 'job_id']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_job_id_get" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs/{job_id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('x_oc_token' in params):
headerParams['x_oc_token'] = params['x_oc_token']
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
if ('job_id' in params):
replacement = str(self.apiClient.toPathValue(params['job_id']))
resourcePath = resourcePath.replace('{' + 'job_id' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Job')
return responseObject
def jobs_job_id_delete(self, **kwargs):
"""Cancels a job created that haven't been started. (Allow to cancel jobs in process.)
Args:
x_oc_token, str: Token for authentication for the current job (required)
x_oc_api_key, str: Api key for the user to filter. (required)
job_id, str: ID of job that needs to be fetched (required)
Returns: Job
"""
allParams = ['x_oc_token', 'x_oc_api_key', 'job_id']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_job_id_delete" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs/{job_id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
if ('x_oc_token' in params):
headerParams['x_oc_token'] = params['x_oc_token']
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
if ('job_id' in params):
replacement = str(self.apiClient.toPathValue(params['job_id']))
resourcePath = resourcePath.replace('{' + 'job_id' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Job')
return responseObject
def jobs_job_id_patch(self, **kwargs):
"""Modifies the job identified by the id, allows to start a created job.
Args:
body, Job: Content of the job. (required)
x_oc_token, str: Token for authentication for the current job (required)
x_oc_api_key, str: Api key for the user to filter. (required)
job_id, str: ID of job that needs to be fetched (required)
Returns: Job
"""
allParams = ['body', 'x_oc_token', 'x_oc_api_key', 'job_id']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_job_id_patch" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs/{job_id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PATCH'
queryParams = {}
headerParams = {}
if ('x_oc_token' in params):
headerParams['x_oc_token'] = params['x_oc_token']
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
if ('job_id' in params):
replacement = str(self.apiClient.toPathValue(params['job_id']))
resourcePath = resourcePath.replace('{' + 'job_id' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Job')
return responseObject
def jobs_job_id_threads_get(self, **kwargs):
"""Get list of threads defined for the current job.
Args:
x_oc_token, str: Token for authentication for the current job (required)
x_oc_api_key, str: Api key for the user to filter. (required)
job_id, str: ID of job that needs to be fetched (required)
Returns: list[Thread]
"""
allParams = ['x_oc_token', 'x_oc_api_key', 'job_id']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method jobs_job_id_threads_get" % key)
params[key] = val
del params['kwargs']
resourcePath = '/jobs/{job_id}/threads'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('x_oc_token' in params):
headerParams['x_oc_token'] = params['x_oc_token']
if ('x_oc_api_key' in params):
headerParams['x_oc_api_key'] = params['x_oc_api_key']
if ('job_id' in params):
replacement = str(self.apiClient.toPathValue(params['job_id']))
resourcePath = resourcePath.replace('{' + 'job_id' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Thread]')
return responseObject
| apache-2.0 |
basicthinker/THNVM | src/arch/x86/isa/insts/simd128/floating_point/arithmetic/square_root.py | 91 | 3871 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SQRTSS_XMM_XMM {
msqrt xmml, xmmlm, size=4, ext=Scalar
};
def macroop SQRTSS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
msqrt xmml, ufp1, size=4, ext=Scalar
};
def macroop SQRTSS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
msqrt xmml, ufp1, size=4, ext=Scalar
};
def macroop SQRTPS_XMM_XMM {
msqrt xmml, xmmlm, size=4, ext=0
msqrt xmmh, xmmhm, size=4, ext=0
};
def macroop SQRTPS_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
msqrt xmml, ufp1, size=4, ext=0
msqrt xmmh, ufp2, size=4, ext=0
};
def macroop SQRTPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
msqrt xmml, ufp1, size=4, ext=0
msqrt xmmh, ufp2, size=4, ext=0
};
def macroop SQRTSD_XMM_XMM {
msqrt xmml, xmmlm, size=8, ext=Scalar
};
def macroop SQRTSD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
msqrt xmml, ufp1, size=8, ext=Scalar
};
def macroop SQRTSD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
msqrt xmml, ufp1, size=8, ext=Scalar
};
def macroop SQRTPD_XMM_XMM {
msqrt xmml, xmmlm, size=8, ext=0
msqrt xmmh, xmmhm, size=8, ext=0
};
def macroop SQRTPD_XMM_M {
ldfp ufp1, seg, sib, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
msqrt xmml, ufp1, size=8, ext=0
msqrt xmmh, ufp2, size=8, ext=0
};
def macroop SQRTPD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, "DISPLACEMENT", dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
msqrt xmml, ufp1, size=8, ext=0
msqrt xmmh, ufp2, size=8, ext=0
};
'''
| bsd-3-clause |
jeanlinux/calibre | src/calibre/devices/mtp/test.py | 14 | 9378 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import unittest, gc, io
from calibre.constants import iswindows, islinux
from calibre.utils.icu import lower
from calibre.devices.mtp.driver import MTP_DEVICE
from calibre.devices.scanner import DeviceScanner
class ProgressCallback(object):
def __init__(self):
self.count = 0
self.end_called = False
def __call__(self, pos, total):
if pos == total:
self.end_called = True
self.count += 1
class TestDeviceInteraction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dev = cls.storage = None
cls.dev = MTP_DEVICE(None)
cls.dev.startup()
cls.scanner = DeviceScanner()
cls.scanner.scan()
cd = cls.dev.detect_managed_devices(cls.scanner.devices)
if cd is None:
cls.dev.shutdown()
cls.dev = None
return
cls.dev.open(cd, 'test_library')
if cls.dev.free_space()[0] < 10*(1024**2):
return
cls.dev.filesystem_cache
cls.storage = cls.dev.filesystem_cache.entries[0]
@classmethod
def tearDownClass(cls):
if cls.dev is not None:
cls.dev.shutdown()
cls.dev = None
def setUp(self):
self.cleanup = []
def tearDown(self):
for obj in reversed(self.cleanup):
self.dev.delete_file_or_folder(obj)
def check_setup(self):
if self.dev is None:
self.skipTest('No MTP device detected')
if self.storage is None:
self.skipTest('The connected device does not have enough free space')
def test_folder_operations(self):
''' Test the creation of folders, duplicate folders and sub folders '''
self.check_setup()
# Create a folder
name = 'zzz-test-folder'
folder = self.dev.create_folder(self.storage, name)
self.cleanup.append(folder)
self.assertTrue(folder.is_folder)
self.assertEqual(folder.parent_id, self.storage.object_id)
self.assertEqual(folder.storage_id, self.storage.object_id)
self.assertEqual(lower(name), lower(folder.name))
# Create a sub-folder
name = 'sub-folder'
subfolder = self.dev.create_folder(folder, name)
self.assertTrue(subfolder.is_folder)
self.assertEqual(subfolder.parent_id, folder.object_id)
self.assertEqual(subfolder.storage_id, self.storage.object_id)
self.assertEqual(lower(name), lower(subfolder.name))
self.cleanup.append(subfolder)
# Check that creating an existing folder returns that folder (case
# insensitively)
self.assertIs(subfolder, self.dev.create_folder(folder,
'SUB-FOLDER'),
msg='Creating an existing folder did not return the existing folder')
# Check that creating folders as children of files is not allowed
root_file = [f for f in self.dev.filesystem_cache.entries[0].files if
not f.is_folder]
if root_file:
with self.assertRaises(ValueError):
self.dev.create_folder(root_file[0], 'sub-folder')
def test_file_transfer(self):
''' Test transferring files to and from the device '''
self.check_setup()
# Create a folder
name = 'zzz-test-folder'
folder = self.dev.create_folder(self.storage, name)
self.cleanup.append(folder)
self.assertTrue(folder.is_folder)
self.assertEqual(folder.parent_id, self.storage.object_id)
# Check simple file put/get
size = 1024**2
raw = io.BytesIO(b'a'*size)
raw.seek(0)
name = 'test-file.txt'
pc = ProgressCallback()
f = self.dev.put_file(folder, name, raw, size, callback=pc)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, folder.object_id)
self.assertEqual(f.storage_id, folder.storage_id)
self.assertTrue(pc.end_called,
msg='Progress callback not called with equal values (put_file)')
self.assertTrue(pc.count > 1,
msg='Progress callback only called once (put_file)')
raw2 = io.BytesIO()
pc = ProgressCallback()
self.dev.get_mtp_file(f, raw2, callback=pc)
self.assertEqual(raw.getvalue(), raw2.getvalue())
self.assertTrue(pc.end_called,
msg='Progress callback not called with equal values (get_file)')
self.assertTrue(pc.count > 1,
msg='Progress callback only called once (get_file)')
# Check file replacement
raw = io.BytesIO(b'abcd')
raw.seek(0)
size = 4
f = self.dev.put_file(folder, name, raw, size)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, folder.object_id)
self.assertEqual(f.storage_id, folder.storage_id)
# Check that we get an error with replace=False
raw.seek(0)
with self.assertRaises(ValueError):
self.dev.put_file(folder, name, raw, size, replace=False)
# Check that we can put a file into the root
raw.seek(0)
name = 'zzz-test-file.txt'
f = self.dev.put_file(self.storage, name, raw, size)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, self.storage.object_id)
self.assertEqual(f.storage_id, self.storage.storage_id)
raw2 = io.BytesIO()
self.dev.get_mtp_file(f, raw2)
self.assertEqual(raw.getvalue(), raw2.getvalue())
def measure_memory_usage(self, repetitions, func, *args, **kwargs):
from calibre.utils.mem import memory
gc.disable()
try:
start_mem = memory()
for i in xrange(repetitions):
func(*args, **kwargs)
for i in xrange(3): gc.collect()
end_mem = memory()
finally:
gc.enable()
return end_mem - start_mem
def check_memory(self, once, many, msg, factor=2):
msg += ' for once: %g for many: %g'%(once, many)
if once > 0:
self.assertTrue(many <= once*factor, msg=msg)
else:
self.assertTrue(many <= 0.01, msg=msg)
@unittest.skipUnless(iswindows or islinux, 'Can only test for leaks on windows and linux')
def test_memory_leaks(self):
''' Test for memory leaks in the C module '''
self.check_setup()
# Test device scanning
used_by_one = self.measure_memory_usage(1,
self.dev.detect_managed_devices, self.scanner.devices,
force_refresh=True)
used_by_many = self.measure_memory_usage(100,
self.dev.detect_managed_devices, self.scanner.devices,
force_refresh=True)
self.check_memory(used_by_one, used_by_many,
'Memory consumption during device scan')
# Test file transfer
size = 1024*100
raw = io.BytesIO(b'a'*size)
raw.seek(0)
name = 'zzz-test-file.txt'
def send_file(storage, name, raw, size):
raw.seek(0)
pc = ProgressCallback()
f = self.dev.put_file(storage, name, raw, size, callback=pc)
self.cleanup.append(f)
del pc
used_once = self.measure_memory_usage(1, send_file, self.storage, name,
raw, size)
used_many = self.measure_memory_usage(20, send_file, self.storage, name,
raw, size)
self.check_memory(used_once, used_many,
'Memory consumption during put_file:')
def get_file(f):
raw = io.BytesIO()
pc = ProgressCallback()
self.dev.get_mtp_file(f, raw, callback=pc)
raw.truncate(0)
del raw
del pc
f = self.storage.file_named(name)
used_once = self.measure_memory_usage(1, get_file, f)
used_many = self.measure_memory_usage(20, get_file, f)
self.check_memory(used_once, used_many,
'Memory consumption during get_file:')
# Test get_filesystem
used_by_one = self.measure_memory_usage(1,
self.dev.dev.get_filesystem, self.storage.object_id,
lambda x, l:True)
used_by_many = self.measure_memory_usage(5,
self.dev.dev.get_filesystem, self.storage.object_id,
lambda x, l: True)
self.check_memory(used_by_one, used_by_many,
'Memory consumption during get_filesystem')
def tests():
tl = unittest.TestLoader()
# return tl.loadTestsFromName('test.TestDeviceInteraction.test_memory_leaks')
return tl.loadTestsFromTestCase(TestDeviceInteraction)
def run():
unittest.TextTestRunner(verbosity=2).run(tests())
if __name__ == '__main__':
run()
| gpl-3.0 |
lidiamcfreitas/FenixScheduleMaker | oldFiles/project-env/lib/python2.7/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| bsd-2-clause |
ssaamm/sign-language-translator | app.py | 2 | 1970 | from classifier import clf
from flask import Flask, render_template, jsonify, request, json
from hand_data import get_hand_position
from lib import Leap
import pickle
import random
import redis
app = Flask(__name__)
controller = Leap.Controller()
controller.set_policy(Leap.Controller.POLICY_BACKGROUND_FRAMES)
past_symbol = 'a'
prev_prediction = None
r = redis.StrictRedis(host='localhost', port=6379, db=0)
@app.route('/translate')
def translate():
return render_template('ui.html')
@app.route('/')
def tutorial():
return render_template('tutorial.html')
@app.route('/score', methods=['POST'])
def add_score():
data = request.form
try:
record = json.dumps({'user': data['user'], 'score': int(data['score'])})
print record
result = r.lpush('scoreboard', record)
return jsonify(error=result)
except KeyError:
return jsonify(error=True)
@app.route('/scores', methods=['GET'])
def get_scores():
scores = [json.loads(i) for i in r.lrange('scoreboard', 0, 100)]
scores.sort(key=lambda s: s['score'], reverse=True)
return jsonify(scores=scores[:10])
@app.route('/current')
def current_symbol():
global past_symbol
global prev_prediction
# Is there a hand?
hand_pos = get_hand_position(controller)
if not hand_pos:
new = past_symbol != ' '
past_symbol = ' '
return jsonify(symbol=' ', new=new)
features = [v for k, v in hand_pos.iteritems()]
# Do we have a new symbol?
prediction = ''.join(clf.predict(features))
if prediction == prev_prediction:
# We good fam
return jsonify(new=False, symbol=prediction)
else:
prev_prediction = prediction
return jsonify(new=True, symbol=prediction)
@app.route('/splash')
def splash():
return render_template('splash.html')
@app.route('/scoreboard')
def scoreboard():
return jsonify(user_score=100)
if __name__ == '__main__':
app.run(debug=True)
| mit |
alanch-ms/PTVS | Python/Product/PythonTools/Templates/Projects/WorkerRoleProject/worker.py | 3 | 1954 | import os
from time import sleep
#
# The azure library provides access to services made available by the
# Microsoft Azure platform, such as storage and messaging.
#
# See http://go.microsoft.com/fwlink/?linkid=254360 for documentation and
# example code.
#
from azure.servicebus import ServiceBusService
from azure.storage import CloudStorageAccount
#
# The CloudStorageAccount provides factory methods for the queue, table, and
# blob services.
#
# See http://go.microsoft.com/fwlink/?linkid=246933 for Storage documentation.
#
STORAGE_ACCOUNT_NAME = '__paste_your_storage_account_name_here__'
STORAGE_ACCOUNT_KEY = '__paste_your_storage_key_here__'
if os.environ.get('EMULATED', '').lower() == 'true':
# Running in the emulator, so use the development storage account
storage_account = CloudStorageAccount(None, None)
else:
storage_account = CloudStorageAccount(STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY)
blob_service = storage_account.create_blob_service()
table_service = storage_account.create_table_service()
queue_service = storage_account.create_queue_service()
#
# Service Bus is a messaging solution for applications. It sits between
# components of your applications and enables them to exchange messages in a
# loosely coupled way for improved scale and resiliency.
#
# See http://go.microsoft.com/fwlink/?linkid=246934 for Service Bus documentation.
#
SERVICE_BUS_NAMESPACE = '__paste_your_service_bus_namespace_here__'
SERVICE_BUS_KEY = '__paste_your_service_bus_key_here__'
bus_service = ServiceBusService(SERVICE_BUS_NAMESPACE, SERVICE_BUS_KEY, issuer='owner')
if __name__ == '__main__':
while True:
#
# Write your worker process here.
#
# You will probably want to call a blocking function such as
# bus_service.receive_queue_message('queue name', timeout=seconds)
# to avoid consuming 100% CPU time while your worker has no work.
#
sleep(1.0)
| apache-2.0 |
code-kitchen/django-utensils | utensils/storage.py | 1 | 1100 | from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
"""
Store your static and media files on Amazon S3 easily.
Example settings config:
AWS = {
'STATIC': {
'location': 'static', # AWS_LOCATION
'querystring_auth': False, # AWS_QUERYSTRING_AUTH
'default_acl': 'public-read', # AWS_DEFAULT_ACL
},
'MEDIA': {
'location': 'media', # AWS_LOCATION
'querystring_auth': True, # AWS_QUERYSTRING_AUTH
'default_acl': 'private', # AWS_DEFAULT_ACL
},
}
AWS_ACCESS_KEY_ID = 'XXXXXXXXXXXXXXXXXXXX'
AWS_SECRET_ACCESS_KEY = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
AWS_STORAGE_BUCKET_NAME = 'bucket'
AWS_PRELOAD_METADATA = True
AWS_S3_SECURE_URLS = False
STATICFILES_STORAGE = 'utensils.storage.StaticRootS3BotoStorage'
DEFAULT_FILE_STORAGE = 'utensils.storage.MediaRootS3BotoStorage'
"""
StaticRootS3BotoStorage = lambda: S3BotoStorage(**settings.AWS["STATIC"])
MediaRootS3BotoStorage = lambda: S3BotoStorage(**settings.AWS["MEDIA"])
| mit |
walterreade/scikit-learn | sklearn/datasets/tests/test_base.py | 33 | 7160 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
| bsd-3-clause |
mdrumond/tensorflow | tensorflow/python/layers/convolutional_test.py | 31 | 43050 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.convolutional."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConvTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d(images, 32, None)
def testCreateConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
def testCreateConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannels(self):
images = random_ops.random_uniform((5, 7, 9, 4))
images._shape = tensor_shape.as_shape((5, 7, 9, None))
layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
images = random_ops.random_uniform((5, 4, 7, 9))
images._shape = tensor_shape.as_shape((5, None, 7, 9))
layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(images)
def testConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateConv1D(self):
width = 7
data = random_ops.random_uniform((5, width, 4))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
output = layer.apply(data)
self.assertEqual(output.op.name, 'conv1d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv1DFloat16(self):
width = 7
data = random_ops.random_uniform((5, width, 4), dtype='float16')
output = conv_layers.conv1d(data, 32, 3, activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32])
def testCreateConv1DChannelsFirst(self):
width = 7
data = random_ops.random_uniform((5, 4, width))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
output = layer.apply(data)
self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv1D(self):
data = random_ops.random_uniform((5, 4, 7))
data._shape = tensor_shape.as_shape((5, 4, None))
layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
data = random_ops.random_uniform((5, 7, 4))
data._shape = tensor_shape.as_shape((5, None, 4))
layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(data)
def testCreateConv3D(self):
depth, height, width = 6, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 4))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth - 2, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testUnknownInputChannelsConv3D(self):
volumes = random_ops.random_uniform((5, 6, 7, 9, 9))
volumes._shape = tensor_shape.as_shape((5, 6, 7, 9, None))
layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
with self.assertRaisesRegexp(ValueError,
'The channel dimension of the inputs '
'should be defined. Found `None`.'):
_ = layer.apply(volumes)
def testConv2DKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testDilatedConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
# Test tuple dilation rate
layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32])
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3], name='conv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3], name='conv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
# Conv1D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv1d = conv_layers.Conv1D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 5), seed=1)
conv1d(inputs)
self.assertEqual(conv1d.kernel_constraint, k_constraint)
self.assertEqual(conv1d.bias_constraint, b_constraint)
# Conv2D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv2d = conv_layers.Conv2D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
conv2d(inputs)
self.assertEqual(conv2d.kernel_constraint, k_constraint)
self.assertEqual(conv2d.bias_constraint, b_constraint)
# Conv3D
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
conv3d = conv_layers.Conv3D(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
conv3d(inputs)
self.assertEqual(conv3d.kernel_constraint, k_constraint)
self.assertEqual(conv3d.bias_constraint, b_constraint)
class SeparableConv2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.separable_conv2d(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.separable_conv2d(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.separable_conv2d(images, 32, None)
def testCreateSeparableConv2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DDepthMultiplier(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, [3, 3], depth_multiplier=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 2])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 8, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateSeparableConv2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.SeparableConv2D(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height - 2, width - 2])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testSeparableConv2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.SeparableConv2D(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateSeparableConvWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 32])
def testCreateSeparableConvWithStridesChannelsFirst(self):
data_format = 'channels_first'
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, 3, height, width), seed=1)
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 2), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test strides integer
layer = conv_layers.SeparableConv2D(32, [3, 3], strides=2, padding='same',
data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width / 2])
# Test unequal strides
layer = conv_layers.SeparableConv2D(
32, [3, 3], strides=(2, 1), padding='same', data_format=data_format)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height / 2, width])
def testFunctionalConv2DReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(
images, 32, [3, 3], name='sepconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
self.assertEqual(len(variables.trainable_variables()), 3)
def testFunctionalConv2DInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3], name='sepconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('depthwise_kernel' in weights[0].name)
self.assertTrue('pointwise_kernel' in weights[1].name)
self.assertTrue('bias' in weights[2].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 1)))
self.assertAllClose(weights[1], np.ones((1, 1, 3, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[2], np.zeros((32)))
def testFunctionalConv2DNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 3)
conv_layers.separable_conv2d(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 6)
def testSeparableConv2DDepthwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], depthwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DPointwiseRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], pointwise_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.SeparableConv2D(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testSeparableConv2DNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.SeparableConv2D(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'separable_conv2d/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height - 2, width - 2, 32])
self.assertListEqual(layer.depthwise_kernel.get_shape().as_list(),
[3, 3, 4, 1])
self.assertListEqual(layer.pointwise_kernel.get_shape().as_list(),
[1, 1, 4, 32])
self.assertEqual(layer.bias, None)
def testConstraints(self):
d_constraint = lambda x: x / math_ops.reduce_sum(x)
p_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.SeparableConv2D(2, 3,
depthwise_constraint=d_constraint,
pointwise_constraint=p_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.depthwise_constraint, d_constraint)
self.assertEqual(layer.pointwise_constraint, p_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv2DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv2d_transpose(images, 32, 3, strides=None)
def testInvalidKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv2d_transpose(images, 32, None)
def testCreateConv2DTranspose(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, [3, 3], activation=nn_ops.relu)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposeFloat16(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
output = conv_layers.conv2d_transpose(images, 32, [3, 3],
activation=nn_ops.relu)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
def testCreateConv2DTransposeIntegerKernelSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(32, 3)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv2DTransposeChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 4, height, width))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, 32, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testConv2DTransposePaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layer = conv_layers.Conv2DTranspose(
64, images.get_shape()[1:3], padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 64])
def testCreateConv2DTransposeWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test strides integer
layer = conv_layers.Conv2DTranspose(32, [3, 3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width * 2, 32])
# Test unequal strides
layer = conv_layers.Conv2DTranspose(
32, [3, 3], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height * 2, width, 32])
def testConv2DTransposeKernelRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], kernel_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DTransposeBiasRegularizer(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv2DTranspose(32, [3, 3], bias_regularizer=reg)
layer.apply(images)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv2DTransposeNoBias(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = conv_layers.Conv2DTranspose(
32, [3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(images)
self.assertEqual(output.op.name, 'conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height + 2, width + 2, 32])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 32, 4])
self.assertEqual(layer.bias, None)
def testFunctionalConv2DTransposeReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv2DTransposeInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 32, 3)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((32)))
def testFunctionalConv2DTransposeNoReuse(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv2d_transpose(images, 32, [3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv2DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
class Conv3DTransposeTest(test.TestCase):
def testInvalidDataFormat(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid')
def testInvalidStrides(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
with self.assertRaisesRegexp(ValueError, 'strides'):
conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
def testInvalidKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, (1, 2))
with self.assertRaisesRegexp(ValueError, 'kernel_size'):
conv_layers.conv3d_transpose(volumes, 4, None)
def testCreateConv3DTranspose(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], activation=nn_ops.relu)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeIntegerKernelSize(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(4, 3)
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testCreateConv3DTransposeChannelsFirst(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, 32, depth, height, width))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], data_format='channels_first')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, 4, depth + 2, height + 2, width + 2])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertListEqual(layer.bias.get_shape().as_list(), [4])
def testConv3DTransposePaddingSame(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 64), seed=1)
layer = conv_layers.Conv3DTranspose(
32, volumes.get_shape()[1:4], padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth, height, width, 32])
def testCreateConv3DTransposeWithStrides(self):
depth, height, width = 4, 6, 8
# Test strides tuple.
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 2, 2), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test strides integer.
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], strides=2, padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height * 2, width * 2, 4])
# Test unequal strides.
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], strides=(2, 1, 1), padding='same')
output = layer.apply(volumes)
self.assertListEqual(output.get_shape().as_list(),
[5, depth * 2, height, width, 4])
def testConv3DTransposeKernelRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], kernel_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv3DTransposeBiasRegularizer(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
reg = lambda x: 0.1 * math_ops.reduce_sum(x)
layer = conv_layers.Conv3DTranspose(4, [3, 3, 3], bias_regularizer=reg)
layer.apply(volumes)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(layer.losses, loss_keys)
def testConv3DTransposeNoBias(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32))
layer = conv_layers.Conv3DTranspose(
4, [3, 3, 3], activation=nn_ops.relu, use_bias=False)
output = layer.apply(volumes)
self.assertEqual(output.op.name, 'conv3d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, depth + 2, height + 2, width + 2, 4])
self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 3, 4, 32])
self.assertEqual(layer.bias, None)
def testFunctionalConv3DTransposeReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(
volumes, 4, [3, 3, 3], name='deconv1', reuse=True)
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeReuseFromScope(self):
with variable_scope.variable_scope('scope'):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
with variable_scope.variable_scope('scope', reuse=True):
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
self.assertEqual(len(variables.trainable_variables()), 2)
def testFunctionalConv3DTransposeInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform(
(5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3], name='deconv1')
weights = variables.trainable_variables()
# Check the names of weights in order.
self.assertTrue('kernel' in weights[0].name)
self.assertTrue('bias' in weights[1].name)
sess.run(variables.global_variables_initializer())
weights = sess.run(weights)
# Check that the kernel weights got initialized to ones (from scope)
self.assertAllClose(weights[0], np.ones((3, 3, 3, 4, 32)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((4)))
def testFunctionalConv3DTransposeNoReuse(self):
depth, height, width = 5, 7, 9
volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 2)
conv_layers.conv3d_transpose(volumes, 4, [3, 3, 3])
self.assertEqual(len(variables.trainable_variables()), 4)
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
layer = conv_layers.Conv3DTranspose(2, 3,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
layer(inputs)
self.assertEqual(layer.kernel_constraint, k_constraint)
self.assertEqual(layer.bias_constraint, b_constraint)
if __name__ == '__main__':
test.main()
| apache-2.0 |
schqiushui/android_kernel_htc_msm8974 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
Snifer/BurpSuite-Plugins | faraday/shell/core/helpers_python.py | 2 | 14698 | '''
Faraday Penetration Test IDE - Community Version
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
"""
* This is an implementation of wcwidth() and wcswidth() (defined in
* IEEE Std 1002.1-2001) for Unicode.
*
* http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
* http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
*
* In fixed-width output devices, Latin characters all occupy a single
* "cell" position of equal width, whereas ideographic CJK characters
* occupy two such cells. Interoperability between terminal-line
* applications and (teletype-style) character terminals using the
* UTF-8 encoding requires agreement on which character should advance
* the cursor by how many cell positions. No established formal
* standards exist at present on which Unicode character shall occupy
* how many cell positions on character terminals. These routines are
* a first attempt of defining such behavior based on simple rules
* applied to data provided by the Unicode Consortium.
*
* For some graphical characters, the Unicode standard explicitly
* defines a character-cell width via the definition of the East Asian
* FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
* In all these cases, there is no ambiguity about which width a
* terminal shall use. For characters in the East Asian Ambiguous (A)
* class, the width choice depends purely on a preference of backward
* compatibility with either historic CJK or Western practice.
* Choosing single-width for these characters is easy to justify as
* the appropriate long-term solution, as the CJK practice of
* displaying these characters as double-width comes from historic
* implementation simplicity (8-bit encoded characters were displayed
* single-width and 16-bit ones double-width, even for Greek,
* Cyrillic, etc.) and not any typographic considerations.
*
* Much less clear is the choice of width for the Not East Asian
* (Neutral) class. Existing practice does not dictate a width for any
* of these characters. It would nevertheless make sense
* typographically to allocate two character cells to characters such
* as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
* represented adequately with a single-width glyph. The following
* routines at present merely assign a single-cell width to all
* neutral characters, in the interest of simplicity. This is not
* entirely satisfactory and should be reconsidered before
* establishing a formal standard in this area. At the moment, the
* decision which Not East Asian (Neutral) characters should be
* represented by double-width glyphs cannot yet be answered by
* applying a simple rule from the Unicode database content. Setting
* up a proper standard for the behavior of UTF-8 character terminals
* will require a careful analysis not only of each Unicode character,
* but also of each presentation form, something the author of these
* routines has avoided to do so far.
*
* http://www.unicode.org/unicode/reports/tr11/
*
* Markus Kuhn -- 2003-05-20 (Unicode 4.0)
*
* Permission to use, copy, modify, and distribute this software
* for any purpose and without fee is hereby granted. The author
* disclaims all warranties with regard to this software.
*
* Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
"""
# auxiliary function for binary search in interval table
def _bisearch(ucs, table):
min = 0
max = len(table)-1
if ucs < table[0][0] or ucs > table[max][1]:
return 0
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return 1
return 0
"""
* The following two functions define the column width of an ISO 10646
* character as follows:
*
* - The null character (U+0000) has a column width of 0.
*
* - Other C0/C1 control characters and DEL will lead to a return
* value of -1.
*
* - Non-spacing and enclosing combining characters (general
* category code Mn or Me in the Unicode database) have a
* column width of 0.
*
* - SOFT HYPHEN (U+00AD) has a column width of 1.
*
* - Other format characters (general category code Cf in the Unicode
* database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
*
* - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
* have a column width of 0.
*
* - Spacing characters in the East Asian Wide (W) or East Asian
* Full-width (F) category as defined in Unicode Technical
* Report #11 have a column width of 2.
*
* - All remaining characters (including all printable
* ISO 8859-1 and WGL4 characters, Unicode control characters,
* etc.) have a column width of 1.
*
* This implementation assumes that wchar_t characters are encoded
* in ISO 10646.
"""
"""
sorted list of non-overlapping intervals of non-spacing characters
generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
"""
_combining = [
( 0x0300, 0x0357 ), ( 0x035D, 0x036F ), ( 0x0483, 0x0486 ),
( 0x0488, 0x0489 ), ( 0x0591, 0x05A1 ), ( 0x05A3, 0x05B9 ),
( 0x05BB, 0x05BD ), ( 0x05BF, 0x05BF ), ( 0x05C1, 0x05C2 ),
( 0x05C4, 0x05C4 ), ( 0x0600, 0x0603 ), ( 0x0610, 0x0615 ),
( 0x064B, 0x0658 ), ( 0x0670, 0x0670 ), ( 0x06D6, 0x06E4 ),
( 0x06E7, 0x06E8 ), ( 0x06EA, 0x06ED ), ( 0x070F, 0x070F ),
( 0x0711, 0x0711 ), ( 0x0730, 0x074A ), ( 0x07A6, 0x07B0 ),
( 0x0901, 0x0902 ), ( 0x093C, 0x093C ), ( 0x0941, 0x0948 ),
( 0x094D, 0x094D ), ( 0x0951, 0x0954 ), ( 0x0962, 0x0963 ),
( 0x0981, 0x0981 ), ( 0x09BC, 0x09BC ), ( 0x09C1, 0x09C4 ),
( 0x09CD, 0x09CD ), ( 0x09E2, 0x09E3 ), ( 0x0A01, 0x0A02 ),
( 0x0A3C, 0x0A3C ), ( 0x0A41, 0x0A42 ), ( 0x0A47, 0x0A48 ),
( 0x0A4B, 0x0A4D ), ( 0x0A70, 0x0A71 ), ( 0x0A81, 0x0A82 ),
( 0x0ABC, 0x0ABC ), ( 0x0AC1, 0x0AC5 ), ( 0x0AC7, 0x0AC8 ),
( 0x0ACD, 0x0ACD ), ( 0x0AE2, 0x0AE3 ), ( 0x0B01, 0x0B01 ),
( 0x0B3C, 0x0B3C ), ( 0x0B3F, 0x0B3F ), ( 0x0B41, 0x0B43 ),
( 0x0B4D, 0x0B4D ), ( 0x0B56, 0x0B56 ), ( 0x0B82, 0x0B82 ),
( 0x0BC0, 0x0BC0 ), ( 0x0BCD, 0x0BCD ), ( 0x0C3E, 0x0C40 ),
( 0x0C46, 0x0C48 ), ( 0x0C4A, 0x0C4D ), ( 0x0C55, 0x0C56 ),
( 0x0CBC, 0x0CBC ), ( 0x0CBF, 0x0CBF ), ( 0x0CC6, 0x0CC6 ),
( 0x0CCC, 0x0CCD ), ( 0x0D41, 0x0D43 ), ( 0x0D4D, 0x0D4D ),
( 0x0DCA, 0x0DCA ), ( 0x0DD2, 0x0DD4 ), ( 0x0DD6, 0x0DD6 ),
( 0x0E31, 0x0E31 ), ( 0x0E34, 0x0E3A ), ( 0x0E47, 0x0E4E ),
( 0x0EB1, 0x0EB1 ), ( 0x0EB4, 0x0EB9 ), ( 0x0EBB, 0x0EBC ),
( 0x0EC8, 0x0ECD ), ( 0x0F18, 0x0F19 ), ( 0x0F35, 0x0F35 ),
( 0x0F37, 0x0F37 ), ( 0x0F39, 0x0F39 ), ( 0x0F71, 0x0F7E ),
( 0x0F80, 0x0F84 ), ( 0x0F86, 0x0F87 ), ( 0x0F90, 0x0F97 ),
( 0x0F99, 0x0FBC ), ( 0x0FC6, 0x0FC6 ), ( 0x102D, 0x1030 ),
( 0x1032, 0x1032 ), ( 0x1036, 0x1037 ), ( 0x1039, 0x1039 ),
( 0x1058, 0x1059 ), ( 0x1160, 0x11FF ), ( 0x1712, 0x1714 ),
( 0x1732, 0x1734 ), ( 0x1752, 0x1753 ), ( 0x1772, 0x1773 ),
( 0x17B4, 0x17B5 ), ( 0x17B7, 0x17BD ), ( 0x17C6, 0x17C6 ),
( 0x17C9, 0x17D3 ), ( 0x17DD, 0x17DD ), ( 0x180B, 0x180D ),
( 0x18A9, 0x18A9 ), ( 0x1920, 0x1922 ), ( 0x1927, 0x1928 ),
( 0x1932, 0x1932 ), ( 0x1939, 0x193B ), ( 0x200B, 0x200F ),
( 0x202A, 0x202E ), ( 0x2060, 0x2063 ), ( 0x206A, 0x206F ),
( 0x20D0, 0x20EA ), ( 0x302A, 0x302F ), ( 0x3099, 0x309A ),
( 0xFB1E, 0xFB1E ), ( 0xFE00, 0xFE0F ), ( 0xFE20, 0xFE23 ),
( 0xFEFF, 0xFEFF ), ( 0xFFF9, 0xFFFB ), ( 0x1D167, 0x1D169 ),
( 0x1D173, 0x1D182 ), ( 0x1D185, 0x1D18B ), ( 0x1D1AA, 0x1D1AD ),
( 0xE0001, 0xE0001 ), ( 0xE0020, 0xE007F ), ( 0xE0100, 0xE01EF )
]
def wcwidth(c):
"""
Return the width in character cells of the Unicode character
whose code is c
"""
ucs = ord(c)
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return -1
# binary search in table of non-spacing characters
if _bisearch(ucs, _combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return 1+ \
(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs *
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd)))
def wcswidth( pwcs ):
"""
Return the width in character cells of the unicode string pwcs,
or -1 if the string contains non-printable characters.
"""
width = 0
for c in pwcs:
w = wcwidth(c)
if w < 0:
return -1
else:
width += w
return width
"""
sorted list of non-overlapping intervals of East Asian Ambiguous
characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c"
"""
_ambiguous = [
( 0x00A1, 0x00A1 ), ( 0x00A4, 0x00A4 ), ( 0x00A7, 0x00A8 ),
( 0x00AA, 0x00AA ), ( 0x00AE, 0x00AE ), ( 0x00B0, 0x00B4 ),
( 0x00B6, 0x00BA ), ( 0x00BC, 0x00BF ), ( 0x00C6, 0x00C6 ),
( 0x00D0, 0x00D0 ), ( 0x00D7, 0x00D8 ), ( 0x00DE, 0x00E1 ),
( 0x00E6, 0x00E6 ), ( 0x00E8, 0x00EA ), ( 0x00EC, 0x00ED ),
( 0x00F0, 0x00F0 ), ( 0x00F2, 0x00F3 ), ( 0x00F7, 0x00FA ),
( 0x00FC, 0x00FC ), ( 0x00FE, 0x00FE ), ( 0x0101, 0x0101 ),
( 0x0111, 0x0111 ), ( 0x0113, 0x0113 ), ( 0x011B, 0x011B ),
( 0x0126, 0x0127 ), ( 0x012B, 0x012B ), ( 0x0131, 0x0133 ),
( 0x0138, 0x0138 ), ( 0x013F, 0x0142 ), ( 0x0144, 0x0144 ),
( 0x0148, 0x014B ), ( 0x014D, 0x014D ), ( 0x0152, 0x0153 ),
( 0x0166, 0x0167 ), ( 0x016B, 0x016B ), ( 0x01CE, 0x01CE ),
( 0x01D0, 0x01D0 ), ( 0x01D2, 0x01D2 ), ( 0x01D4, 0x01D4 ),
( 0x01D6, 0x01D6 ), ( 0x01D8, 0x01D8 ), ( 0x01DA, 0x01DA ),
( 0x01DC, 0x01DC ), ( 0x0251, 0x0251 ), ( 0x0261, 0x0261 ),
( 0x02C4, 0x02C4 ), ( 0x02C7, 0x02C7 ), ( 0x02C9, 0x02CB ),
( 0x02CD, 0x02CD ), ( 0x02D0, 0x02D0 ), ( 0x02D8, 0x02DB ),
( 0x02DD, 0x02DD ), ( 0x02DF, 0x02DF ), ( 0x0391, 0x03A1 ),
( 0x03A3, 0x03A9 ), ( 0x03B1, 0x03C1 ), ( 0x03C3, 0x03C9 ),
( 0x0401, 0x0401 ), ( 0x0410, 0x044F ), ( 0x0451, 0x0451 ),
( 0x2010, 0x2010 ), ( 0x2013, 0x2016 ), ( 0x2018, 0x2019 ),
( 0x201C, 0x201D ), ( 0x2020, 0x2022 ), ( 0x2024, 0x2027 ),
( 0x2030, 0x2030 ), ( 0x2032, 0x2033 ), ( 0x2035, 0x2035 ),
( 0x203B, 0x203B ), ( 0x203E, 0x203E ), ( 0x2074, 0x2074 ),
( 0x207F, 0x207F ), ( 0x2081, 0x2084 ), ( 0x20AC, 0x20AC ),
( 0x2103, 0x2103 ), ( 0x2105, 0x2105 ), ( 0x2109, 0x2109 ),
( 0x2113, 0x2113 ), ( 0x2116, 0x2116 ), ( 0x2121, 0x2122 ),
( 0x2126, 0x2126 ), ( 0x212B, 0x212B ), ( 0x2153, 0x2154 ),
( 0x215B, 0x215E ), ( 0x2160, 0x216B ), ( 0x2170, 0x2179 ),
( 0x2190, 0x2199 ), ( 0x21B8, 0x21B9 ), ( 0x21D2, 0x21D2 ),
( 0x21D4, 0x21D4 ), ( 0x21E7, 0x21E7 ), ( 0x2200, 0x2200 ),
( 0x2202, 0x2203 ), ( 0x2207, 0x2208 ), ( 0x220B, 0x220B ),
( 0x220F, 0x220F ), ( 0x2211, 0x2211 ), ( 0x2215, 0x2215 ),
( 0x221A, 0x221A ), ( 0x221D, 0x2220 ), ( 0x2223, 0x2223 ),
( 0x2225, 0x2225 ), ( 0x2227, 0x222C ), ( 0x222E, 0x222E ),
( 0x2234, 0x2237 ), ( 0x223C, 0x223D ), ( 0x2248, 0x2248 ),
( 0x224C, 0x224C ), ( 0x2252, 0x2252 ), ( 0x2260, 0x2261 ),
( 0x2264, 0x2267 ), ( 0x226A, 0x226B ), ( 0x226E, 0x226F ),
( 0x2282, 0x2283 ), ( 0x2286, 0x2287 ), ( 0x2295, 0x2295 ),
( 0x2299, 0x2299 ), ( 0x22A5, 0x22A5 ), ( 0x22BF, 0x22BF ),
( 0x2312, 0x2312 ), ( 0x2460, 0x24E9 ), ( 0x24EB, 0x254B ),
( 0x2550, 0x2573 ), ( 0x2580, 0x258F ), ( 0x2592, 0x2595 ),
( 0x25A0, 0x25A1 ), ( 0x25A3, 0x25A9 ), ( 0x25B2, 0x25B3 ),
( 0x25B6, 0x25B7 ), ( 0x25BC, 0x25BD ), ( 0x25C0, 0x25C1 ),
( 0x25C6, 0x25C8 ), ( 0x25CB, 0x25CB ), ( 0x25CE, 0x25D1 ),
( 0x25E2, 0x25E5 ), ( 0x25EF, 0x25EF ), ( 0x2605, 0x2606 ),
( 0x2609, 0x2609 ), ( 0x260E, 0x260F ), ( 0x2614, 0x2615 ),
( 0x261C, 0x261C ), ( 0x261E, 0x261E ), ( 0x2640, 0x2640 ),
( 0x2642, 0x2642 ), ( 0x2660, 0x2661 ), ( 0x2663, 0x2665 ),
( 0x2667, 0x266A ), ( 0x266C, 0x266D ), ( 0x266F, 0x266F ),
( 0x273D, 0x273D ), ( 0x2776, 0x277F ), ( 0xE000, 0xF8FF ),
( 0xFFFD, 0xFFFD ), ( 0xF0000, 0xFFFFD ), ( 0x100000, 0x10FFFD )
]
"""
* The following functions are the same as mk_wcwidth() and
* mk_wcwidth_cjk(), except that spacing characters in the East Asian
* Ambiguous (A) category as defined in Unicode Technical Report #11
* have a column width of 2. This variant might be useful for users of
* CJK legacy encodings who want to migrate to UCS without changing
* the traditional terminal character-width behaviour. It is not
* otherwise recommended for general use.
"""
def wcwidth_cjk(ucs):
""" As wcwidth above, but spacing characters in the East Asian
Ambiguous (A) category as defined in Unicode Technical Report #11
have a column width of 2.
"""
if _bisearch(ucs, _ambiguous):
return 2
else:
return wcwidth(ucs)
def wcswidth_cjk(pwcs):
""" As wcswidth above, but spacing characters in the East Asian
Ambiguous (A) category as defined in Unicode Technical Report #11
have a column width of 2.
"""
width = 0
for c in pwcs:
w = wcwidth_cjk(c)
if w < 0:
return -1
else:
width += w
return width
#####################################################################
def _measure_string(ucs, length):
t = 0
i = 0
while t < length and i < len(ucs):
t += wcswidth(ucs[i])
i += 1
return (ucs[:i], t)
def rpadstring(ucs, length, padchar=' '):
""" Right-pad a Unicode string with padchar so that its width in
character cells is length. Padchar must be of width 1. The string
is truncated if it is too long."""
s, t = _measure_string(ucs,length)
if t > length:
return s[:-1] + padchar
elif t < length:
return s + padchar * (length-t)
else:
return s
def truncatestring(ucs, length):
""" Truncate a Unicode string so that its length is as long as it
can be without exceeding length."""
s, t = _measure_string(ucs,length)
if t > length:
return s[:-1]
else:
return s
def wcWidth(c):
return wcwidth(chr(c))
| gpl-2.0 |
murix/openimu | libraries/adxl345driver/examples/ADXL345_run/accel_cal.py | 16 | 1450 | import scipy.optimize
from pylab import *
from numpy import *
cal = open('cal1.dat')
for f in [cal]:
data = array([map(float, l.split()) for l in f.readlines()])
N = len(data[0])
x = data[:,0]
y = data[:,1]
z = data[:,2]
data = data[:,:3]
def cost(args):
rs = diag(1. / array(args[0:3])) ** 2
c = array(args[3:6])
m = data - array(c)
out = 0
for p in m:
out += abs(dot(dot(p, rs), p) - 1)
return out
guess = array([250, 250, 250, 0, 0, 0])
print cost(guess)
best = scipy.optimize.fmin(cost, guess)
print cost(best)
print best
print 1/best[:3]
here
A = array([[cx + r * cos(theta),
cy + r * sin(theta)] for theta in arange(0, 2 * pi, 1 * pi/180)])
# plot(A[:,0], A[:,1], 'g-')
xy = data[:,1:3]
def cost(params):
cx, cy, r = params
xy_ = xy - [cx, cy]
thetas = arctan2(xy_[:,1], xy_[:,0])
resids = xy_ - transpose([r * cos(thetas), r * sin(thetas)])
return sum(ravel(resids ** 2))
cx, cy, r = scipy.optimize.fmin(cost, [cx, cy, r], disp=False)
print f.name, cx, cy, r
## acc_cal_lights_on.dat 550.150958354 507.218838209 249.831129791
## acc_cal_lights_off.dat 563.391868993 518.281081432 251.367556713
A = array([[cx + r * cos(theta),
cy + r * sin(theta)] for theta in arange(0, 2 * pi, 1 * pi/180)])
plot(A[:,0], A[:,1])
| gpl-3.0 |
a-parhom/edx-platform | cms/djangoapps/contentstore/tests/test_clone_course.py | 21 | 6291 | """
Unit tests for cloning a course between the same and different module stores.
"""
import json
from django.conf import settings
from mock import Mock, patch
from opaque_keys.edx.locator import CourseLocator
from contentstore.tasks import rerun_course
from contentstore.tests.utils import CourseTestCase
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from student.auth import has_course_author_access
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import EdxJSONEncoder, ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class CloneCourseTest(CourseTestCase):
"""
Unit tests for cloning a course
"""
def test_clone_course(self):
"""Tests cloning of a course as follows: XML -> Mongo (+ data) -> Mongo -> Split -> Split"""
# 1. import and populate test toy course
mongo_course1_id = self.import_and_populate_course()
mongo_course2_id = mongo_course1_id
# 3. clone course (mongo -> split)
with self.store.default_store(ModuleStoreEnum.Type.split):
split_course3_id = CourseLocator(
org="edx3", course="split3", run="2013_Fall"
)
self.store.clone_course(mongo_course2_id, split_course3_id, self.user.id)
self.assertCoursesEqual(mongo_course2_id, split_course3_id)
# 4. clone course (split -> split)
split_course4_id = CourseLocator(
org="edx4", course="split4", run="2013_Fall"
)
self.store.clone_course(split_course3_id, split_course4_id, self.user.id)
self.assertCoursesEqual(split_course3_id, split_course4_id)
def test_space_in_asset_name_for_rerun_course(self):
"""
Tests check the scenario where one course which has an asset with percentage(%) in its
name, it should re-run successfully.
"""
org = 'edX'
course_number = 'CS101'
course_run = '2015_Q1'
display_name = 'rerun'
fields = {'display_name': display_name}
course_assets = set([u'subs_Introduction%20To%20New.srt.sjson'], )
# Create a course using split modulestore
course = CourseFactory.create(
org=org,
number=course_number,
run=course_run,
display_name=display_name,
default_store=ModuleStoreEnum.Type.split
)
# add an asset
asset_key = course.id.make_asset_key('asset', 'subs_Introduction%20To%20New.srt.sjson')
content = StaticContent(
asset_key, 'Dummy assert', 'application/json', 'dummy data',
)
contentstore().save(content)
# Get & verify all assets of the course
assets, count = contentstore().get_all_content_for_course(course.id)
self.assertEqual(count, 1)
self.assertEqual(set([asset['asset_key'].block_id for asset in assets]), course_assets)
# rerun from split into split
split_rerun_id = CourseLocator(org=org, course=course_number, run="2012_Q2")
CourseRerunState.objects.initiated(course.id, split_rerun_id, self.user, fields['display_name'])
result = rerun_course.delay(
unicode(course.id),
unicode(split_rerun_id),
self.user.id,
json.dumps(fields, cls=EdxJSONEncoder)
)
# Check if re-run was successful
self.assertEqual(result.get(), "succeeded")
rerun_state = CourseRerunState.objects.find_first(course_key=split_rerun_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
def test_rerun_course(self):
"""
Unit tests for :meth: `contentstore.tasks.rerun_course`
"""
mongo_course1_id = self.import_and_populate_course()
# rerun from mongo into split
split_course3_id = CourseLocator(
org="edx3", course="split3", run="rerun_test"
)
# Mark the action as initiated
fields = {'display_name': 'rerun'}
CourseRerunState.objects.initiated(mongo_course1_id, split_course3_id, self.user, fields['display_name'])
result = rerun_course.delay(unicode(mongo_course1_id), unicode(split_course3_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertEqual(result.get(), "succeeded")
self.assertTrue(has_course_author_access(self.user, split_course3_id), "Didn't grant access")
rerun_state = CourseRerunState.objects.find_first(course_key=split_course3_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
# try creating rerunning again to same name and ensure it generates error
result = rerun_course.delay(unicode(mongo_course1_id), unicode(split_course3_id), self.user.id)
self.assertEqual(result.get(), "duplicate course")
# the below will raise an exception if the record doesn't exist
CourseRerunState.objects.find_first(
course_key=split_course3_id,
state=CourseRerunUIStateManager.State.FAILED
)
# try to hit the generic exception catch
with patch('xmodule.modulestore.split_mongo.mongo_connection.MongoConnection.insert_course_index', Mock(side_effect=Exception)):
split_course4_id = CourseLocator(org="edx3", course="split3", run="rerun_fail")
fields = {'display_name': 'total failure'}
CourseRerunState.objects.initiated(split_course3_id, split_course4_id, self.user, fields['display_name'])
result = rerun_course.delay(unicode(split_course3_id), unicode(split_course4_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertIn("exception: ", result.get())
self.assertIsNone(self.store.get_course(split_course4_id), "Didn't delete course after error")
CourseRerunState.objects.find_first(
course_key=split_course4_id,
state=CourseRerunUIStateManager.State.FAILED
)
| agpl-3.0 |
elidrc/PSO | test_pso.py | 1 | 1192 | from benchmark_functions import *
from pso import *
import matplotlib.pyplot as plt
iterations = 100
particles = 500
dimensions = 2
search_space = [[-5.12] * dimensions, [5.12] * dimensions]
# print init_pso(iterations, particles, search_space)
velocity, fitness, local_best, local_position, global_best, global_position = init_pso(iterations, particles,
search_space)
# print create_swarm(particles, search_space)
swarm = create_swarm(particles, search_space)
iteration = 0
while iteration < iterations:
fitness = [sphere(solution) for solution in swarm]
local_best, local_position = update_local_position(swarm, fitness, local_best, local_position)
global_best, global_position = update_global_position(swarm, local_best, global_best, global_position, iteration)
swarm, velocity = update_swarm(swarm, velocity, local_position, global_position, iteration)
swarm = check_swarm(swarm, search_space)
iteration += 1
plt.plot(global_best, '.-', label='%f' % min(global_best))
plt.xlim(-1, iteration)
# plt.ylim(min(global_best), max(global_best)+0.01)
plt.legend()
plt.show()
| mit |
sahana/Turkey | modules/unit_tests/s3/s3timeplot.py | 14 | 74187 | # -*- coding: utf-8 -*-
#
# Time Plot Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3timeplot.py
#
import datetime
import random
import unittest
import dateutil.tz
from gluon import *
from s3.s3timeplot import *
from s3.s3timeplot import tp_datetime
from s3.s3query import FS
# =============================================================================
class EventTests(unittest.TestCase):
""" Tests for S3TimeSeriesEvent class """
def testSeries(self):
""" Test series-method """
series = S3TimeSeriesEvent.series
assertEqual = self.assertEqual
# Explicit None should give {None}
assertEqual(series(None), set([None]))
# Single value should give {value}
assertEqual(series(0), set([0]))
assertEqual(series("A"), set(["A"]))
# Empty list should give {None}
assertEqual(series([]), set())
assertEqual(series([[], []]), set())
# List should give equivalent set
assertEqual(series(["A", "B"]), set(["A", "B"]))
assertEqual(series(["A", None]), set(["A", None]))
assertEqual(series(["A", []]), set(["A"]))
# Nested lists should be flattened
assertEqual(series(["A", ["B", "C"]]), set(["A", "B", "C"]))
assertEqual(series(["A", ["B", "A"]]), set(["A", "B"]))
# -------------------------------------------------------------------------
def testConstruction(self):
""" Test event construction """
assertEqual = self.assertEqual
assertTrue = self.assertTrue
start = tp_datetime(2004, 1, 1)
end = tp_datetime(2004, 3, 21)
event_id = 1
values = {"test": 2}
# Test construction without values
event = S3TimeSeriesEvent(event_id,
start=start,
end=end,
)
assertEqual(event.event_id, event_id)
assertTrue(isinstance(event.values, dict))
assertEqual(event["test"], None)
# Test construction with values
event = S3TimeSeriesEvent(event_id,
start=start,
end=end,
values=values,
)
assertEqual(event.event_id, event_id)
assertEqual(event.values, values)
assertEqual(event["test"], values["test"])
# Test construction with values and row
event = S3TimeSeriesEvent(event_id,
start=start,
end=end,
values=values,
row = "A",
)
self.assertEqual(event.event_id, event_id)
self.assertEqual(event.values, values)
self.assertEqual(event["test"], values["test"])
self.assertEqual(event.rows, set(["A"]))
self.assertEqual(event.cols, set())
# Test construction with values and row/col
event = S3TimeSeriesEvent(event_id,
start=start,
end=end,
values=values,
row = "B",
col = [1, 4, 7],
)
self.assertEqual(event.event_id, event_id)
self.assertEqual(event.values, values)
self.assertEqual(event["test"], values["test"])
self.assertEqual(event.rows, set(["B"]))
self.assertEqual(event.cols, set([1, 4, 7]))
# -------------------------------------------------------------------------
def testComparison(self):
""" Test comparison method __lt__ for events (by sorting them) """
data = [
(8, (2012,1,12), (2013,9,19)),
(2, None, (2013,4,21)),
(6, (2013,4,14), (2013,5,7)),
(3, (2012,4,21), (2013,6,3)),
(4, (2013,4,18), (2013,5,27)),
]
events = []
for event_id, start, end in data:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
)
events.append(event)
order = [event.event_id for event in sorted(events)]
self.assertEqual(order, [2, 8, 3, 6, 4])
# =============================================================================
class PeriodTests(unittest.TestCase):
""" Tests for S3TimeSeriesPeriod """
def setUp(self):
# Period
start = tp_datetime(2013,4,1)
end = tp_datetime(2013,7,1)
period = S3TimeSeriesPeriod(start=start, end=end)
# Add current events
events = [
# 7 days
(1, (2013,4,1), (2013,4,7), {"base": 10, "slope": 2}, "A", 1),
(2, (2013,4,3), (2013,4,9), {"base": 20, "slope": 5}, "A", 2),
(3, (2013,4,5), (2013,4,11), {"base": 30, "slope": 2}, "B", 2),
(4, (2013,4,7), (2013,4,13), {"base": 40, "slope": 5}, "B", 3),
# 10 days
(5, (2013,5,1), (2013,5,10), {"base": 50, "slope": 5}, "C", 1),
(6, (2013,5,3), (2013,5,12), {"base": 60, "slope": 2}, "C", 2),
(7, (2013,5,5), (2013,5,14), {"base": 70, "slope": 5}, "A", 1),
(8, (2013,5,7), (2013,5,16), {"base": 80, "slope": 2}, "A", 2),
# 20 days
(9, (2013,6,1), (2013,6,20), {"base": 100, "slope": 4}, "B", 2),
(10, (2013,6,3), (2013,6,22), {"base": 200, "slope": 7}, "B", 3),
(11, (2013,6,5), (2013,6,24), {"base": 300, "slope": 4}, "C", 1),
(12, (2013,6,12), None, {"base": 400, "slope": 7}, "C", 3),
]
for event_id, start, end, values, row, col in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
row = row,
col = col,
)
period.add_current(event)
self.current_events = events
# Add previous events
events = [
# 10 days
(13, (2012,8,1), (2012,8,10), {"base": 20, "slope": 5}, "A", 3),
(14, (2012,8,3), (2012,8,12), {"base": 20, "slope": 5}, "B", 2),
(15, None, (2012,8,14), {"base": 20, "slope": 5}, "C", 1),
(16, (2012,8,7), (2012,8,16), {"base": 20, "slope": 5}, "C", 3),
]
for event_id, start, end, values, row, col in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
row = row,
col = col,
)
period.add_previous(event)
self.previous_events = events
# Store period
self.period = period
# -------------------------------------------------------------------------
def testEvents(self):
""" Verify events in test period """
assertTrue = self.assertTrue
cevents = self.period.cevents
for item in self.current_events:
assertTrue(item[0] in cevents)
pevents = self.period.pevents
for item in self.previous_events:
assertTrue(item[0] in pevents)
# -------------------------------------------------------------------------
def testDuration(self):
""" Test computation of event duration before the end of a period """
events = (
((2013, 1, 1, 0, 0, 0), (2013, 1, 31, 0, 0, 0), 31),
((2013, 3, 1, 0, 0, 0), (2013, 4, 2, 0, 0, 0), 33),
((2013, 3, 8, 0, 0, 0), (2013, 8, 5, 0, 0, 0), 116),
((2013, 5, 1, 0, 0, 0), (2013, 9, 21, 0, 0, 0), 62),
((2013, 5, 1, 0, 0, 0), (2013, 5, 5, 0, 0, 0), 5),
((2013, 8, 5, 0, 0, 0), (2013, 9, 16, 0, 0, 0), 0),
)
period = self.period
for index, event in enumerate(events):
start, end, expected_duration = event
tp_event = S3TimeSeriesEvent(index,
start=tp_datetime(*start),
end=tp_datetime(*end),
)
duration = period.duration(tp_event, "days")
self.assertEqual(duration, expected_duration,
msg = "Incorrect result for duration of event %s: %s != %s." %
(index + 1, duration, expected_duration))
# -------------------------------------------------------------------------
def testGrouping(self):
""" Test grouping of period events """
period = self.period
period.group()
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
expected_rows = {"A": ((1, 2, 7, 8), ()),
"B": ((3, 4, 9, 10), ()),
"C": ((5, 6, 11, 12), ()),
}
rows = period._rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
expected_current = set(expected_rows.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_rows.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check columns
expected_cols = {1: ((1, 5, 7, 11), ()),
2: ((2, 3, 6, 8, 9), ()),
3: ((4, 10, 12), ()),
}
cols = period._cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
expected_current = set(expected_cols.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_cols.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check matrix
expected_matrix = {("A", 1): ((1, 7), ()),
("A", 2): ((2, 8), ()),
#("A", 3): (empty),
#("B", 1): (empty),
("B", 2): ((3, 9), ()),
("B", 3): ((4, 10), ()),
("C", 1): ((5, 11), ()),
("C", 2): ((6,), ()),
("C", 3): ((12,), ()),
}
matrix = period._matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
expected_current = set(expected_matrix.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_matrix.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# -------------------------------------------------------------------------
def testGroupingCumulative(self):
""" Test grouping of period events including previous events """
period = self.period
period.group(cumulative=True)
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
expected_rows = {"A": ((1, 2, 7, 8), (13,)),
"B": ((3, 4, 9, 10), (14,)),
"C": ((5, 6, 11, 12), (15,16)),
}
rows = period._rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
expected_current = set(expected_rows.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_rows.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check columns
expected_cols = {1: ((1, 5, 7, 11), (15,)),
2: ((2, 3, 6, 8, 9), (14,)),
3: ((4, 10, 12), (13, 16)),
}
cols = period._cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
expected_current = set(expected_cols.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_cols.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check matrix
expected_matrix = {("A", 1): ((1, 7), ()),
("A", 2): ((2, 8), ()),
("A", 3): ((), (13,)),
#("B", 1): (empty),
("B", 2): ((3, 9), (14,)),
("B", 3): ((4, 10), ()),
("C", 1): ((5, 11), (15,)),
("C", 2): ((6,), ()),
("C", 3): ((12,), (16,)),
}
matrix = period._matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
expected_current = set(expected_matrix.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_matrix.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# -------------------------------------------------------------------------
def testAggregateCount(self):
""" Test aggregation: count """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("count", "base"))
# Check rows
expected_rows = {"A": [4],
"B": [4],
"C": [4],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
expected_cols = {1: [4],
2: [5],
3: [3],
}
cols = period.cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
assertEqual(v, expected_cols.get(k),
msg = "Column %s: %s != %s" %
(k, v, expected_cols.get(k)))
# Check matrix
expected_matrix = {("A", 1): [2],
("A", 2): [2],
("B", 2): [2],
("B", 3): [2],
("C", 1): [2],
("C", 2): [1],
("C", 3): [1],
}
matrix = period.matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
assertEqual(v, expected_matrix.get(k),
msg = "Cell %s: %s != %s" %
(k, v, expected_matrix.get(k)))
# Check total
expected_totals = [12]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateSum(self):
""" Test aggregation: sum """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("sum", "base"))
# Check rows
expected_rows = {"A": [180],
"B": [370],
"C": [810],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
expected_cols = {1: [430],
2: [290],
3: [640],
}
cols = period.cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
assertEqual(v, expected_cols.get(k),
msg = "Column %s: %s != %s" %
(k, v, expected_cols.get(k)))
# Check matrix
expected_matrix = {("A", 1): [80],
("A", 2): [100],
("B", 2): [130],
("B", 3): [240],
("C", 1): [350],
("C", 2): [60],
("C", 3): [400],
}
matrix = period.matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
assertEqual(v, expected_matrix.get(k),
msg = "Cell %s: %s != %s" %
(k, v, expected_matrix.get(k)))
# Check total
expected_totals = [1360]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateAvg(self):
""" Test aggregation: avg """
period = self.period
assertEqual = self.assertEqual
assertAlmostEqual = self.assertAlmostEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("avg", "base"))
# Check rows
expected_rows = {"A": [45],
"B": [92.5],
"C": [202.5],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
expected = expected_rows.get(k)
for i, expected_value in enumerate(expected):
assertAlmostEqual(v[i], expected_value,
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
expected_cols = {1: [107.5],
2: [58],
3: [213.3333333],
}
cols = period.cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
expected = expected_cols.get(k)
for i, expected_value in enumerate(expected):
assertAlmostEqual(v[i], expected_value,
msg = "Column %s: %s != %s" %
(k, v, expected_cols.get(k)))
# Check matrix
expected_matrix = {("A", 1): [40],
("A", 2): [50],
("B", 2): [65],
("B", 3): [120],
("C", 1): [175],
("C", 2): [60],
("C", 3): [400],
}
matrix = period.matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
expected = expected_matrix.get(k)
for i, expected_value in enumerate(expected):
assertAlmostEqual(v[i], expected_value,
msg = "Cell %s: %s != %s" %
(k, v, expected_matrix.get(k)))
# Check total
expected_totals = [113.3333333]
assertEqual(len(period.totals), 1)
assertAlmostEqual(period.totals[0], expected_totals[0])
assertEqual(len(totals), 1)
assertAlmostEqual(totals[0], expected_totals[0])
# -------------------------------------------------------------------------
def testAggregateMinMax(self):
""" Test aggregation: min/max (combined) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate([S3TimeSeriesFact("min", "base"),
S3TimeSeriesFact("max", "base"),
],
)
# Check rows
expected_rows = {"A": [10, 80],
"B": [30, 200],
"C": [50, 400],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
expected_cols = {1: [10, 300],
2: [20, 100],
3: [40, 400],
}
cols = period.cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
assertEqual(v, expected_cols.get(k),
msg = "Column %s: %s != %s" %
(k, v, expected_cols.get(k)))
# Check matrix
expected_matrix = {("A", 1): [10, 70],
("A", 2): [20, 80],
("B", 2): [30, 100],
("B", 3): [40, 200],
("C", 1): [50, 300],
("C", 2): [60, 60],
("C", 3): [400, 400],
}
matrix = period.matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
assertEqual(v, expected_matrix.get(k),
msg = "Cell %s: %s != %s" %
(k, v, expected_matrix.get(k)))
# Check total
expected_totals = [10, 400]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateCumulate(self):
""" Test aggregation: sum/cumulate (combined) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate([S3TimeSeriesFact("sum", "base"),
S3TimeSeriesFact("cumulate",
"base",
slope="slope",
interval="days",
),
]
)
# Check rows
expected_rows = {"A": [180, 369],
"B": [370, 709],
"C": [810, 1170],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
expected_cols = {1: [430, 624],
2: [290, 529],
3: [640, 1095],
}
cols = period.cols
assertEqual(set(cols.keys()), set(expected_cols.keys()))
for k, v in cols.items():
assertEqual(v, expected_cols.get(k),
msg = "Column %s: %s != %s" %
(k, v, expected_cols.get(k)))
# Check matrix
expected_matrix = {("A", 1): [80, 144],
("A", 2): [100, 155],
("A", 3): [0, 70],
("B", 2): [130, 294],
("B", 3): [240, 415],
("C", 1): [350, 480],
("C", 2): [60, 80],
("C", 3): [400, 610],
}
matrix = period.matrix
assertEqual(set(matrix.keys()), set(expected_matrix.keys()))
for k, v in matrix.items():
assertEqual(v, expected_matrix.get(k),
msg = "Cell %s: %s != %s" %
(k, v, expected_matrix.get(k)))
# Check total
expected_totals = [1360, 2248]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# =============================================================================
class PeriodTestsSingleAxis(unittest.TestCase):
""" Tests for S3TimeSeriesPeriod with single pivot axis """
def setUp(self):
# Period
start = tp_datetime(2013,4,1)
end = tp_datetime(2013,7,1)
period = S3TimeSeriesPeriod(start=start, end=end)
# Add current events
events = [
# 7 days
(1, (2013,4,1), (2013,4,7), {"base": 10, "slope": 2}, "A"),
(2, (2013,4,3), (2013,4,9), {"base": 20, "slope": 5}, "A"),
(3, (2013,4,5), (2013,4,11), {"base": 30, "slope": 2}, "B"),
(4, (2013,4,7), (2013,4,13), {"base": 40, "slope": 5}, "B"),
# 10 days
(5, (2013,5,1), (2013,5,10), {"base": 50, "slope": 5}, "C"),
(6, (2013,5,3), (2013,5,12), {"base": 60, "slope": 2}, "C"),
(7, (2013,5,5), (2013,5,14), {"base": 70, "slope": 5}, "A"),
(8, (2013,5,7), (2013,5,16), {"base": 80, "slope": 2}, "A"),
# 20 days
(9, (2013,6,1), (2013,6,20), {"base": 100, "slope": 4}, "B"),
(10, (2013,6,3), (2013,6,22), {"base": 200, "slope": 7}, "B"),
(11, (2013,6,5), (2013,6,24), {"base": 300, "slope": 4}, "C"),
(12, (2013,6,12), None, {"base": 400, "slope": 7}, "C"),
]
for event_id, start, end, values, row in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
row = row,
)
period.add_current(event)
self.current_events = events
# Add previous events
events = [
# 10 days
(13, (2012,8,1), (2012,8,10), {"base": 20, "slope": 5}, "A"),
(14, (2012,8,3), (2012,8,12), {"base": 20, "slope": 5}, "B"),
(15, None, (2012,8,14), {"base": 20, "slope": 5}, "C"),
(16, (2012,8,7), (2012,8,16), {"base": 20, "slope": 5}, "C"),
]
for event_id, start, end, values, row in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
row = row,
)
period.add_previous(event)
self.previous_events = events
# Store period
self.period = period
# -------------------------------------------------------------------------
def testGrouping(self):
""" Test grouping of period events (single axis) """
period = self.period
period.group()
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
expected_rows = {"A": ((1, 2, 7, 8), ()),
"B": ((3, 4, 9, 10), ()),
"C": ((5, 6, 11, 12), ()),
}
rows = period._rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
expected_current = set(expected_rows.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_rows.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check columns
assertEqual(period._cols, {})
# Check matrix
assertEqual(period._matrix, {})
# -------------------------------------------------------------------------
def testGroupingCumulative(self):
""" Test grouping of period events including previous events (single axis) """
period = self.period
period.group(cumulative=True)
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
expected_rows = {"A": ((1, 2, 7, 8), (13,)),
"B": ((3, 4, 9, 10), (14,)),
"C": ((5, 6, 11, 12), (15, 16)),
}
rows = period._rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
expected_current = set(expected_rows.get(k)[0])
assertEqual(v[0], expected_current,
msg = "Row %s current events: %s != %s" %
(k, v[0], expected_current))
expected_previous = set(expected_rows.get(k)[1])
assertEqual(v[1], expected_previous,
msg = "Row %s previous events: %s != %s" %
(k, v[1], expected_previous))
# Check columns
assertEqual(period._cols, {})
# Check matrix
assertEqual(period._matrix, {})
# -------------------------------------------------------------------------
def testAggregateCount(self):
""" Test aggregation: count (single axis) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("count", "base"))
# Check rows
expected_rows = {"A": [4],
"B": [4],
"C": [4],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
assertEqual(period.cols, {})
# Check matrix
assertEqual(period.matrix, {})
# Check total
expected_totals = [12]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateSum(self):
""" Test aggregation: sum (single axis) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("sum", "base"))
# Check rows
expected_rows = {"A": [180],
"B": [370],
"C": [810],
}
rows = period.rows
assertEqual(set(rows.keys()), set(expected_rows.keys()))
for k, v in rows.items():
assertEqual(v, expected_rows.get(k),
msg = "Row %s: %s != %s" %
(k, v, expected_rows.get(k)))
# Check columns
assertEqual(period.cols, {})
# Check matrix
assertEqual(period.matrix, {})
# Check total
expected_totals = [1360]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# =============================================================================
class PeriodTestsNoGroups(unittest.TestCase):
""" Tests for S3TimeSeriesPeriod without grouping """
def setUp(self):
# Period
start = tp_datetime(2013,4,1)
end = tp_datetime(2013,7,1)
period = S3TimeSeriesPeriod(start=start, end=end)
# Add current events
events = [
# 7 days
(1, (2013,4,1), (2013,4,7), {"base": 10, "slope": 2}),
(2, (2013,4,3), (2013,4,9), {"base": 20, "slope": 5}),
(3, (2013,4,5), (2013,4,11), {"base": 30, "slope": 2}),
(4, (2013,4,7), (2013,4,13), {"base": 40, "slope": 5}),
# 10 days
(5, (2013,5,1), (2013,5,10), {"base": 50, "slope": 5}),
(6, (2013,5,3), (2013,5,12), {"base": 60, "slope": 2}),
(7, (2013,5,5), (2013,5,14), {"base": 70, "slope": 5}),
(8, (2013,5,7), (2013,5,16), {"base": 80, "slope": 2}),
# 20 days
(9, (2013,6,1), (2013,6,20), {"base": 100, "slope": 4}),
(10, (2013,6,3), (2013,6,22), {"base": 200, "slope": 7}),
(11, (2013,6,5), (2013,6,24), {"base": 300, "slope": 4}),
(12, (2013,6,12), None, {"base": 400, "slope": 7}),
]
for event_id, start, end, values in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
)
period.add_current(event)
self.current_events = events
# Add previous events
events = [
# 10 days
(13, (2012,8,1), (2012,8,10), {"base": 20, "slope": 5}),
(14, (2012,8,3), (2012,8,12), {"base": 20, "slope": 5}),
(15, None, (2012,8,14), {"base": 20, "slope": 5}),
(16, (2012,8,7), (2012,8,16), {"base": 20, "slope": 5}),
]
for event_id, start, end, values in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
event = S3TimeSeriesEvent(event_id,
start = event_start,
end = event_end,
values = values,
)
period.add_previous(event)
self.previous_events = events
# Store period
self.period = period
# -------------------------------------------------------------------------
def testGrouping(self):
""" Test grouping of period events (no grouping) """
period = self.period
period.group()
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
assertEqual(period._rows, {})
# Check columns
assertEqual(period._cols, {})
# Check matrix
assertEqual(period._matrix, {})
# -------------------------------------------------------------------------
def testGroupingCumulative(self):
""" Test grouping of period events including previous events (no grouping) """
period = self.period
period.group(cumulative=True)
assertTrue = self.assertTrue
assertEqual = self.assertEqual
# Check rows
assertEqual(period._rows, {})
# Check columns
assertEqual(period._cols, {})
# Check matrix
assertEqual(period._matrix, {})
# -------------------------------------------------------------------------
def testAggregateCount(self):
""" Test aggregation: count (no grouping) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("count", "base"))
# Check rows
assertEqual(period.rows, {})
# Check columns
assertEqual(period.cols, {})
# Check matrix
assertEqual(period.matrix, {})
# Check total
expected_totals = [12]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateSum(self):
""" Test aggregation: sum (no grouping) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("sum", "base"))
# Check rows
assertEqual(period.rows, {})
# Check columns
assertEqual(period.cols, {})
# Check matrix
assertEqual(period.matrix, {})
# Check total
expected_totals = [1360]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# -------------------------------------------------------------------------
def testAggregateCumulate(self):
""" Test aggregation: cumulate (no grouping) """
period = self.period
assertEqual = self.assertEqual
# Aggregate
totals = period.aggregate(S3TimeSeriesFact("cumulate",
"base",
slope="slope",
interval="days",
)
)
# Check rows
assertEqual(period.rows, {})
# Check columns
assertEqual(period.cols, {})
# Check matrix
assertEqual(period.matrix, {})
# Check total
expected_totals = [2248]
assertEqual(period.totals, expected_totals)
assertEqual(totals, expected_totals)
# =============================================================================
class EventFrameTests(unittest.TestCase):
""" Tests for S3TimeSeriesEventFrame class """
def setUp(self):
data = [
# Always
(1, None, None, {"test": 2}),
# First two quarters
(2, None, (2012,6,19), {"test": 5}),
# Last three quarters
(3, (2012,5,1), None, {"test": 8}),
# First and Second Quarter
(4, (2012,1,14), (2012,5,7), {"test": 3}),
# Second and Third Quarter
(5, (2012,5,1), (2012,7,21), {"test": 2}),
# Third and Fourth Quarter
(6, (2012,8,8), (2012,11,3), {"test": 1}),
# Only Fourth Quarter
(7, (2012,10,18), (2013,5,27), {"test": 9}),
# Ended before Event Frame
(8, (2011,1,1), (2011,12,6), {"test": 9}),
# Starting after Event Frame
(9, (2013,1,18), (2013,5,27), {"test": 3}),
]
events = []
for event_id, start, end, values in data:
events.append(S3TimeSeriesEvent(event_id,
start=tp_datetime(*start) if start else None,
end=tp_datetime(*end) if end else None,
values=values,
))
self.events = events
# -------------------------------------------------------------------------
def testExtend(self):
""" Test correct grouping of events into periods """
# Create event frame and add events
ef = S3TimeSeriesEventFrame(tp_datetime(2012,1,1),
tp_datetime(2012,12,15),
slots="3 months")
ef.extend(self.events)
# Expected result (start, end, previous, current, results)
expected = [
((2012, 1, 1), (2012, 4, 1), [8], [1, 2, 4], [10, 5, 117]),
((2012, 4, 1), (2012, 7, 1), [8], [1, 2, 3, 4, 5], [20, 8, 150]),
((2012, 7, 1), (2012, 10, 1), [8, 2, 4], [1, 3, 5, 6], [13, 8, 176]),
((2012, 10, 1), (2012, 12, 15), [8, 2, 4, 5], [1, 3, 6, 7], [20, 9, 211]),
]
# Check
assertEqual = self.assertEqual
for i, period in enumerate(ef):
start, end, previous, current, expected_result = expected[i]
# Check start/end date of period
assertEqual(period.start, tp_datetime(*start))
assertEqual(period.end, tp_datetime(*end))
# Check current events in period
event_ids = period.cevents.keys()
assertEqual(set(event_ids), set(current))
# Check previous events in period
event_ids = period.pevents.keys()
assertEqual(set(event_ids), set(previous))
# Check aggregation (multi-fact)
result = period.aggregate([S3TimeSeriesFact("sum", "test"),
S3TimeSeriesFact("max", "test"),
S3TimeSeriesFact("cumulate",
None,
slope="test",
interval="months",
),
])
assertEqual(result, expected_result)
# -------------------------------------------------------------------------
def testPeriodsDays(self):
""" Test iteration over periods (days) """
assertEqual = self.assertEqual
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 1, 8),
slots="days")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 1, 6)),
(tp_datetime(2011, 1, 6), tp_datetime(2011, 1, 7)),
(tp_datetime(2011, 1, 7), tp_datetime(2011, 1, 8))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 1, 16),
slots="4 days")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 1, 9)),
(tp_datetime(2011, 1, 9), tp_datetime(2011, 1, 13)),
(tp_datetime(2011, 1, 13), tp_datetime(2011, 1, 16))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
# -------------------------------------------------------------------------
def testPeriodsWeeks(self):
""" Test iteration over periods (weeks) """
assertEqual = self.assertEqual
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 1, 28),
slots="weeks")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 1, 12)),
(tp_datetime(2011, 1, 12), tp_datetime(2011, 1, 19)),
(tp_datetime(2011, 1, 19), tp_datetime(2011, 1, 26)),
(tp_datetime(2011, 1, 26), tp_datetime(2011, 1, 28))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 2, 16),
slots="2 weeks")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 1, 19)),
(tp_datetime(2011, 1, 19), tp_datetime(2011, 2, 2)),
(tp_datetime(2011, 2, 2), tp_datetime(2011, 2, 16))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
# -------------------------------------------------------------------------
def testPeriodsMonths(self):
""" Test iteration over periods (months) """
assertEqual = self.assertEqual
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 4, 28),
slots="months")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 2, 5)),
(tp_datetime(2011, 2, 5), tp_datetime(2011, 3, 5)),
(tp_datetime(2011, 3, 5), tp_datetime(2011, 4, 5)),
(tp_datetime(2011, 4, 5), tp_datetime(2011, 4, 28))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
ef = S3TimeSeriesEventFrame(tp_datetime(2011, 1, 5),
tp_datetime(2011, 8, 16),
slots="3 months")
expected = [(tp_datetime(2011, 1, 5), tp_datetime(2011, 4, 5)),
(tp_datetime(2011, 4, 5), tp_datetime(2011, 7, 5)),
(tp_datetime(2011, 7, 5), tp_datetime(2011, 8, 16))]
for i, period in enumerate(ef):
assertEqual(period.start, expected[i][0])
assertEqual(period.end, expected[i][1])
# =============================================================================
class DtParseTests(unittest.TestCase):
""" Test Parsing of Datetime Options """
# -------------------------------------------------------------------------
def testDtParseAbsolute(self):
""" Test dtparse with absolute dates """
assertTrue = self.assertTrue
assertRaises = self.assertRaises
assertEqual = self.assertEqual
ts = S3TimeSeries
result = ts.dtparse("5/2001")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, tp_datetime(2001, 5, 1, 0, 0, 0))
result = ts.dtparse("2007-03")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, tp_datetime(2007, 3, 1, 0, 0, 0))
result = ts.dtparse("1996")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, tp_datetime(1996, 1, 1, 0, 0, 0))
result = ts.dtparse("2008-02-12")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, tp_datetime(2008, 2, 12, 0, 0, 0))
result = ts.dtparse("2008-02-31")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, tp_datetime(2008, 3, 2, 0, 0, 0))
# Empty string defaults to now
now = datetime.datetime.utcnow()
result = ts.dtparse("")
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result.year, now.year)
assertEqual(result.month, now.month)
assertEqual(result.day, now.day)
assertEqual(result.hour, now.hour)
assertEqual(result.minute, now.minute)
# None defaults to now
now = datetime.datetime.utcnow()
result = ts.dtparse(None)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result.year, now.year)
assertEqual(result.month, now.month)
assertEqual(result.day, now.day)
assertEqual(result.hour, now.hour)
assertEqual(result.minute, now.minute)
assertRaises(ValueError, ts.dtparse, "1985-13")
assertRaises(ValueError, ts.dtparse, "68532")
assertRaises(ValueError, ts.dtparse, "invalid")
# -------------------------------------------------------------------------
def testDtParseRelative(self):
""" Test dtparse with relative dates """
assertTrue = self.assertTrue
assertRaises = self.assertRaises
assertEqual = self.assertEqual
ts = S3TimeSeries
start = datetime.datetime(2014, 1, 3, 11, 30)
result = ts.dtparse("+1 year", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, datetime.datetime(2015, 1, 3, 11, 30, 0))
result = ts.dtparse("-3 days", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, datetime.datetime(2013, 12, 31, 11, 30, 0))
result = ts.dtparse("+5 hours", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, datetime.datetime(2014, 1, 3, 16, 30, 0))
result = ts.dtparse("-6 months", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, datetime.datetime(2013, 7, 3, 11, 30, 0))
result = ts.dtparse("+12 weeks", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result, datetime.datetime(2014, 3, 28, 11, 30, 0))
# Empty string defaults to start
result = ts.dtparse("", start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result.year, start.year)
assertEqual(result.month, start.month)
assertEqual(result.day, start.day)
assertEqual(result.hour, start.hour)
assertEqual(result.minute, start.minute)
# None defaults to start
result = ts.dtparse(None, start=start)
assertTrue(isinstance(result, datetime.datetime))
assertEqual(result.year, start.year)
assertEqual(result.month, start.month)
assertEqual(result.day, start.day)
assertEqual(result.hour, start.hour)
assertEqual(result.minute, start.minute)
assertRaises(ValueError, ts.dtparse, "invalid")
# =============================================================================
class TimeSeriesTests(unittest.TestCase):
""" Tests for S3TimeSeries class """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
db = current.db
db.define_table("tp_test_events",
Field("event_start", "datetime"),
Field("event_end", "datetime"),
Field("parameter1", "integer"),
Field("parameter2", "double"),
Field("event_type"),
)
event_table = db["tp_test_events"]
events = (("STARTEND",
(2011, 1, 3, 0, 0, 0),
(2011, 5, 4, 0, 0, 0),
),
("STARTEND",
(2011, 4, 6, 0, 0, 0),
(2011, 8, 7, 0, 0, 0),
),
("STARTEND",
(2011, 7, 9, 0, 0, 0),
(2011, 11, 10, 0, 0, 0),
),
("NOSTART",
None,
(2012, 2, 13, 0, 0, 0),
),
("NOSTART",
None,
(2012, 5, 16, 0, 0, 0),
),
("NOSTART",
None,
(2012, 8, 19, 0, 0, 0),
),
("NOEND",
(2012, 7, 21, 0, 0, 0),
None,
),
("NOEND",
(2012, 10, 24, 0, 0, 0),
None,
),
("NOEND",
(2013, 1, 27, 0, 0, 0),
None,
),
("PERMANENT",
None,
None,
),
)
for event_type, start, end in events:
event_start = tp_datetime(*start) if start else None
event_end = tp_datetime(*end) if end else None
record = {
"event_type": event_type,
"event_start": event_start,
"event_end": event_end,
"parameter1": 3,
"parameter2": 0.5,
}
event_table.insert(**record)
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
db = current.db
db.tp_test_events.drop()
# -------------------------------------------------------------------------
def setUp(self):
current.auth.override = True
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.override = False
# -------------------------------------------------------------------------
def testAutomaticInterval(self):
""" Test automatic determination of interval start and end """
assertEqual = self.assertEqual
assertTrue = self.assertTrue
is_now = self.is_now
s3db = current.s3db
query = FS("event_type") == "STARTEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2011, 1, 3, 0, 0, 0))
assertTrue(is_now(ef.end))
query = FS("event_type") == "NOSTART"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
)
ef = ts.event_frame
# falls back to first end date minus 1 day
assertEqual(ef.start, tp_datetime(2012, 2, 12, 0, 0, 0))
assertTrue(is_now(ef.end))
query = FS("event_type") == "NOEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2012, 7, 21, 0, 0, 0))
assertTrue(is_now(ef.end))
resource = s3db.resource("tp_test_events")
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2011, 1, 3, 0, 0, 0))
assertTrue(is_now(ef.end))
# -------------------------------------------------------------------------
def testAutomaticSlotLength(self):
""" Test automatic determination of reasonable aggregation time slot """
assertEqual = self.assertEqual
s3db = current.s3db
query = FS("event_type") == "STARTEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2011-03-01",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2011, 1, 3, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2011, 3, 1, 0, 0, 0))
# ~8 weeks => reasonable intervall length: weeks
assertEqual(ef.slots, "weeks")
query = FS("event_type") == "NOSTART"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2013-01-01",
)
ef = ts.event_frame
# falls back to first end date minus 1 day
assertEqual(ef.start, tp_datetime(2012, 2, 12, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2013, 1, 1, 0, 0))
# ~11 months => reasonable intervall length: months
assertEqual(ef.slots, "months")
query = FS("event_type") == "NOEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2016-06-01",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2012, 7, 21, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2016, 6, 1, 0, 0))
# ~4 years => reasonable intervall length: 3 months
assertEqual(ef.slots, "3 months")
resource = s3db.resource("tp_test_events")
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2011-01-15",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2011, 1, 3, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2011, 1, 15, 0, 0))
# ~12 days => reasonable intervall length: days
assertEqual(ef.slots, "days")
# Check with manual slot length
query = FS("event_type") == "NOEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2016-06-01",
slots = "years",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2012, 7, 21, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2016, 6, 1, 0, 0))
assertEqual(ef.slots, "years")
# Check with manual start date
query = FS("event_type") == "STARTEND"
resource = s3db.resource("tp_test_events", filter = query)
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
start = "2011-02-15",
end = "2011-03-01",
)
ef = ts.event_frame
# falls back to first start date
assertEqual(ef.start, tp_datetime(2011, 2, 15, 0, 0, 0))
assertEqual(ef.end, tp_datetime(2011, 3, 1, 0, 0, 0))
# ~14 days => reasonable intervall length: days
assertEqual(ef.slots, "days")
# -------------------------------------------------------------------------
def testEventDataAggregation(self):
""" Test aggregation of event data """
s3db = current.s3db
assertEqual = self.assertEqual
assertTrue = self.assertTrue
PERIODS = "p"
TIMES = "t"
VALUE = "v"
resource = s3db.resource("tp_test_events")
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
end = "2013-01-01",
slots = "months",
facts = [S3TimeSeriesFact("sum", "parameter1")],
)
# Verify correct slot length
assertEqual(ts.event_frame.slots, "months")
expected = [
((2011,1,3), (2011,2,3), [15]), # 00 P NS1 NS2 NS3 SE1
((2011,2,3), (2011,3,3), [15]), # 01 P NS1 NS2 NS3 SE1
((2011,3,3), (2011,4,3), [15]), # 02 P NS1 NS2 NS3 SE1
((2011,4,3), (2011,5,3), [18]), # 03 P NS1 NS2 NS3 SE1 SE2
((2011,5,3), (2011,6,3), [18]), # 04 P NS1 NS2 NS3 SE1 SE2
((2011,6,3), (2011,7,3), [15]), # 05 P NS1 NS2 NS3 SE2
((2011,7,3), (2011,8,3), [18]), # 06 P NS1 NS2 NS3 SE2 SE3
((2011,8,3), (2011,9,3), [18]), # 07 P NS1 NS2 NS3 SE2 SE3
((2011,9,3), (2011,10,3), [15]), # 08 P NS1 NS2 NS3 SE3
((2011,10,3), (2011,11,3), [15]), # 09 P NS1 NS2 NS3 SE3
((2011,11,3), (2011,12,3), [15]), # 10 P NS1 NS2 NS3 SE3
((2011,12,3), (2012,1,3), [12]), # 11 P NS1 NS2 NS3
((2012,1,3), (2012,2,3), [12]), # 12 P NS1 NS2 NS3
((2012,2,3), (2012,3,3), [12]), # 13 P NS1 NS2 NS3
((2012,3,3), (2012,4,3), [9]), # 14 P NS2 NS3
((2012,4,3), (2012,5,3), [9]), # 15 P NS2 NS3
((2012,5,3), (2012,6,3), [9]), # 16 P NS2 NS3
((2012,6,3), (2012,7,3), [6]), # 17 P NS3
((2012,7,3), (2012,8,3), [9]), # 18 P NS3 NE1
((2012,8,3), (2012,9,3), [9]), # 19 P NS3 NE1
((2012,9,3), (2012,10,3), [6]), # 20 P NE1
((2012,10,3), (2012,11,3), [9]), # 21 P NE1 NE2
((2012,11,3), (2012,12,3), [9]), # 22 P NE1 NE2
((2012,12,3), (2013,1,1), [9]), # 23 P NE1 NE2
]
result = ts.as_dict()
periods = result[PERIODS]
for i, period in enumerate(periods):
expected_start, expected_end, expected_value = expected[i]
expected_start = tp_datetime(*expected_start).isoformat()
expected_end = tp_datetime(*expected_end).isoformat()
dates = period.get(TIMES)
assertTrue(isinstance(dates, tuple))
start, end = dates
assertEqual(start, expected_start,
msg="Period %s start should be %s, but is %s" %
(i, expected_start, start))
assertEqual(end, expected_end,
msg="Period %s end should be %s, but is %s" %
(i, expected_end, end))
value = period.get(VALUE)
assertEqual(value, expected_value,
msg="Period %s sum should be %s, but is %s" %
(i, expected_value, value))
# -------------------------------------------------------------------------
def testEventDataCumulativeAggregation(self):
""" Test aggregation of event data, cumulative """
s3db = current.s3db
assertEqual = self.assertEqual
assertTrue = self.assertTrue
PERIODS = "p"
TIMES = "t"
VALUE = "v"
resource = s3db.resource("tp_test_events")
ts = S3TimeSeries(resource,
event_start = "event_start",
event_end = "event_end",
start = "2012-01-01",
end = "2013-01-01",
slots = "months",
facts = [S3TimeSeriesFact("cumulate",
None,
slope="parameter1",
interval="months",
)
],
)
# Verify correct slot length
assertEqual(ts.event_frame.slots, "months")
expected = [
((2012,1,1), (2012,2,1), [45]), # 01 P NS1 NS2 NS3 (SE1 SE2 SE3)
((2012,2,1), (2012,3,1), [45]), # 02 P NS1 NS2 NS3 (SE1 SE2 SE3)
((2012,3,1), (2012,4,1), [45]), # 03 P NS2 NS3 (SE1 SE2 SE3)
((2012,4,1), (2012,5,1), [45]), # 04 P NS2 NS3 (SE1 SE2 SE3)
((2012,5,1), (2012,6,1), [45]), # 05 P NS2 NS3 (SE1 SE2 SE3)
((2012,6,1), (2012,7,1), [45]), # 06 P NS3 (SE1 SE2 SE3)
((2012,7,1), (2012,8,1), [48]), # 07 P NS3 (SE1 SE2 SE3) NE1
((2012,8,1), (2012,9,1), [51]), # 08 P NS3 (SE1 SE2 SE3) NE1
((2012,9,1), (2012,10,1), [54]), # 09 P (SE1 SE2 SE3) NE1
((2012,10,1), (2012,11,1), [60]), # 10 P (SE1 SE2 SE3) NE1 NE2
((2012,11,1), (2012,12,1), [66]), # 11 P (SE1 SE2 SE3) NE1 NE2
((2012,12,1), (2013,1,1), [72]), # 12 P (SE1 SE2 SE3) NE1 NE2
]
result = ts.as_dict()
periods = result[PERIODS]
for i, period in enumerate(periods):
expected_start, expected_end, expected_value = expected[i]
expected_start = tp_datetime(*expected_start).isoformat()
expected_end = tp_datetime(*expected_end).isoformat()
dates = period.get(TIMES)
assertTrue(isinstance(dates, tuple))
start, end = dates
assertEqual(start, expected_start,
msg="Period %s start should be %s, but is %s" %
(i, expected_start, start))
assertEqual(end, expected_end,
msg="Period %s end should be %s, but is %s" %
(i, expected_end, end))
value = period.get(VALUE)
assertEqual(value, expected_value,
msg="Period %s cumulative sum should be %s, but is %s" %
(i, expected_value, value))
# -------------------------------------------------------------------------
@staticmethod
def is_now(dt):
now = datetime.datetime.utcnow()
if dt.year == now.year and \
dt.month == now.month and \
dt.day == now.day and \
dt.hour == now.hour and \
abs(dt.minute - now.minute) < 5:
return True
else:
return False
# =============================================================================
class FactParserTests(unittest.TestCase):
""" Tests for S3TimeSeriesFact parser """
def testFactExpressionParsing(self):
""" Test parsing of fact expressions """
parse = S3TimeSeriesFact.parse
expressions = (
# Isolated selector
("id",
((None, "count", "id", None, None),
)
),
# Normal fact expression
("count(id)",
((None, "count", "id", None, None),
)
),
# Cumulate with base and slope
("cumulate(value,delta,3weeks)",
((None, "cumulate", "value", "delta", "3weeks"),
)
),
# Cumulate without base
("cumulate(delta,3weeks)",
((None, "cumulate", None, "delta", "3weeks"),
)
),
# Cumulate without slope
("cumulate(value)",
((None, "cumulate", "value", None, None),
)
),
# Normal fact expression, with complex selector
("sum(~.value)",
((None, "sum", "~.value", None, None),
)
),
# Normal fact expression, with label
(("Number of Records", "count(id)"),
(("Number of Records", "count", "id", None, None),
)
),
# List of fact expressions
(["count(id)", "sum(value)"],
((None, "count", "id", None, None),
(None, "sum", "value", None, None),
)
),
# List of fact expressions, with label
(["count(id)", ("Example", "min(example)")],
((None, "count", "id", None, None),
("Example", "min", "example", None, None),
)
),
# Multiple fact expressions
("min(other),avg(other)",
((None, "min", "other", None, None),
(None, "avg", "other", None, None),
)
),
# Multiple fact expressions, with label
(("Test", "min(other),avg(other)"),
(("Test", "min", "other", None, None),
("Test", "avg", "other", None, None),
)
),
# Mixed list and multi-expression
(["count(id)", "sum(value),avg(other)"],
((None, "count", "id", None, None),
(None, "sum", "value", None, None),
(None, "avg", "other", None, None),
)
),
# Empty list
([], None),
# Invalid method
(["count(id)", "invalid(value)"], None),
# Missing selector
("count(id),count()", None),
)
assertRaises = self.assertRaises
assertEqual = self.assertEqual
i = 0
for i, (expression, expected) in enumerate(expressions):
if expected is None:
with assertRaises(SyntaxError):
facts = parse(expression)
else:
facts = parse(expression)
assertEqual(len(facts), len(expected))
for j, fact in enumerate(facts):
label, method, base, slope, interval = expected[j]
assertEqual(fact.label, label,
msg = "Expression %s/%s - incorrect label: %s != %s" %
(i, j, fact.label, label))
assertEqual(fact.method, method,
msg = "Expression %s/%s - incorrect method: %s != %s" %
(i, j, fact.method, method))
assertEqual(fact.base, base,
msg = "Expression %s/%s - incorrect base: %s != %s" %
(i, j, fact.base, base))
assertEqual(fact.slope, slope,
msg = "Expression %s/%s - incorrect slope: %s != %s" %
(i, j, fact.slope, slope))
assertEqual(fact.interval, interval,
msg = "Expression %s/%s - incorrect interval: %s != %s" %
(i, j, fact.interval, interval))
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
EventTests,
PeriodTests,
PeriodTestsSingleAxis,
PeriodTestsNoGroups,
EventFrameTests,
DtParseTests,
TimeSeriesTests,
FactParserTests,
)
# END ========================================================================
| mit |
mjabri/holoviews | holoviews/element/path.py | 1 | 8204 | """
A Path element is a way of drawing arbitrary shapes that can be
overlayed on top of other elements.
Subclasses of Path are designed to generate certain common shapes
quickly and condeniently. For instance, the Box path is often useful
for marking areas of a raster image.
Contours is also a subclass of Path but in addition to simply
displaying some information, there is a numeric value associated with
each collection of paths.
"""
import numpy as np
import param
from ..core import Dimension, Element2D
class Path(Element2D):
"""
The Path Element contains a list of Paths stored as Nx2 numpy
arrays. The data may be supplied in one of the following ways:
1) A list of Nx2 numpy arrays.
2) A list of lists containing x/y coordinate tuples.
3) A tuple containing an array of length N with the x-values and a
second array of shape NxP, where P is the number of paths.
4) A list of tuples each containing separate x and y values.
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
constant=True, bounds=(2, 2), doc="""
The label of the x- and y-dimension of the Image in form
of a string or dimension object.""")
group = param.String(default="Path", constant=True)
def __init__(self, data, **params):
if isinstance(data, tuple):
x, y = data
if y.ndim == 1:
y = np.atleast_2d(y).T
if len(x) != y.shape[0]:
raise ValueError("Path x and y values must be the same length.")
data = [np.column_stack((x, y[:, i])) for i in range(y.shape[1])]
elif isinstance(data, list) and all(isinstance(path, tuple) for path in data):
data = [np.column_stack(path) for path in data]
elif len(data) >= 1:
data = [np.array(p) if not isinstance(p, np.ndarray) else p for p in data]
super(Path, self).__init__(data, **params)
def __getitem__(self, key):
if key in self.dimensions(): return self.dimension_values(key)
if not isinstance(key, tuple) or len(key) == 1:
key = (key, slice(None))
elif len(key) == 0: return self.clone()
if not all(isinstance(k, slice) for k in key):
raise IndexError("%s only support slice indexing" %
self.__class__.__name__)
xkey, ykey = key
xstart, xstop = xkey.start, xkey.stop
ystart, ystop = ykey.start, ykey.stop
return self.clone(extents=(xstart, ystart, xstop, ystop))
@classmethod
def collapse_data(cls, data_list, function=None, kdims=None, **kwargs):
if function is None:
return [path for paths in data_list for path in paths]
else:
raise Exception("Path types are not uniformly sampled and"
"therefore cannot be collapsed with a function.")
def __len__(self):
return len(self.data)
def dimension_values(self, dimension):
dim_idx = self.get_dimension_index(dimension)
if dim_idx >= len(self.dimensions()):
return super(Path, self).dimension_values(dimension)
values = []
for contour in self.data:
values.append(contour[:, dim_idx])
return np.concatenate(values) if values else []
class Contours(Path):
"""
Contours is a type of Path that is also associated with a value
(the contour level).
"""
level = param.Number(default=None, doc="""
Optional level associated with the set of Contours.""")
value_dimension = param.List(default=[Dimension('Level')], doc="""
Contours optionally accept a value dimension, corresponding
to the supplied values.""", bounds=(1,1))
group = param.String(default='Contours', constant=True)
def __init__(self, data, **params):
data = [] if data is None else data
super(Contours, self).__init__(data, **params)
def dimension_values(self, dim):
dimension = self.get_dimension(dim)
if dimension in self.vdims:
return [self.level]
return super(Contours, self).dimension_values(dim)
class Polygons(Contours):
"""
Polygons is a Path Element type that may contain any number of
closed paths with an associated value.
"""
group = param.String(default="Polygons", constant=True)
vdims = param.List(default=[Dimension('Value')], doc="""
Polygons optionally accept a value dimension, corresponding
to the supplied value.""", bounds=(1,1))
class BaseShape(Path):
"""
A BaseShape is a Path that can be succinctly expressed by a small
number of parameters instead of a full path specification. For
instance, a circle may be expressed by the center position and
radius instead of an explicit list of path coordinates.
"""
__abstract = True
def clone(self, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
"""
settings = dict(self.get_param_values(), **overrides)
return self.__class__(*args, **settings)
class Box(BaseShape):
"""
Draw a centered box of a given width at the given position with
the specified aspect ratio (if any).
"""
x = param.Number(default=0, doc="The x-position of the box center.")
y = param.Number(default=0, doc="The y-position of the box center.")
height = param.Number(default=1, doc="The height of the box.")
aspect= param.Number(default=1, doc=""""
The aspect ratio of the box if supplied, otherwise an aspect
of 1.0 is used.""")
group = param.String(default='Box', constant=True, doc="The assigned group name.")
def __init__(self, x, y, height, **params):
super(Box, self).__init__([], x=x,y =y, height=height, **params)
width = height * self.aspect
(l,b,r,t) = (x-width/2.0, y-height/2, x+width/2.0, y+height/2)
self.data = [np.array([(l, b), (l, t), (r, t), (r, b),(l, b)])]
class Ellipse(BaseShape):
"""
Draw an axis-aligned ellipse at the specified x,y position with
the given width and aspect ratio. By default draws a circle
(aspect=1).
Note that as a subclass of Path, internally an Ellipse is a
sequency of (x,y) sample positions. Ellipse could also be
implemented as an annotation that uses a dedicated ellipse artist.
"""
x = param.Number(default=0, doc="The x-position of the ellipse center.")
y = param.Number(default=0, doc="The y-position of the ellipse center.")
height = param.Number(default=1, doc="The height of the ellipse.")
aspect= param.Number(default=1.0, doc="The aspect ratio of the ellipse.")
samples = param.Number(default=100, doc="The sample count used to draw the ellipse.")
group = param.String(default='Ellipse', constant=True, doc="The assigned group name.")
def __init__(self, x, y, height, **params):
super(Ellipse, self).__init__([], x=x, y=y, height=height, **params)
angles = np.linspace(0, 2*np.pi, self.samples)
radius = height / 2.0
self.data = [np.array(
list(zip(radius*self.aspect*np.sin(angles)+x,
radius*np.cos(angles)+y)))]
class Bounds(BaseShape):
"""
An arbitrary axis-aligned bounding rectangle defined by the (left,
bottom, right, top) coordinate positions.
If supplied a single real number as input, this value will be
treated as the radius of a square, zero-center box which will be
used to compute the corresponding lbrt tuple.
"""
lbrt = param.NumericTuple(default=(-0.5, -0.5, 0.5, 0.5), doc="""
The (left, bottom, right, top) coordinates of the bounding box.""")
group = param.String(default='Bounds', constant=True, doc="The assigned group name.")
def __init__(self, lbrt, **params):
if not isinstance(lbrt, tuple):
lbrt = (-lbrt, -lbrt, lbrt, lbrt)
super(Bounds, self).__init__([], lbrt=lbrt, **params)
(l,b,r,t) = self.lbrt
self.data = [np.array([(l, b), (l, t), (r, t), (r, b),(l, b)])]
| bsd-3-clause |
demsey/openembedded | contrib/weekly-changelog-report.py | 7 | 2288 | #!/usr/bin/python
# generates an OE changelog for last weeks activity (Mon-Sun) assuming it is run on
# any day of the following week
# TODO
# - remove patch count as it does not match after we remove "Merge branch" statements
# - add bugzilla info
import datetime
import os
today = datetime.date.today()
# 0 = Mon, 6 = Sun
today_weekday = today.weekday()
# find Mon of this week
end_day = today - datetime.timedelta(today_weekday)
start_day = end_day - datetime.timedelta(7)
print "OE weekly changelog %s to %s\n" % (start_day.isoformat(), end_day.isoformat())
os.system("git shortlog --since=%s --until=%s | grep -v 'Merge branch' | grep -v 'Merge commit'|sed -e 's/^ //g'|cut -b -78 " % (start_day.isoformat(), end_day.isoformat()))
os.system("wget 'http://bugs.openembedded.net/buglist.cgi?bug_file_loc=&bug_file_loc_type=allwordssubstr&bug_id=&bug_status=RESOLVED&bug_status=VERIFIED&bug_status=CLOSED&bugidtype=include&chfieldfrom=7d&chfieldto=Now&chfieldvalue=&email1=&email2=&emailassigned_to1=1&emailassigned_to2=1&emailcc2=1&emailqa_contact2=1&emailreporter2=1&emailtype1=substring&emailtype2=substring&field-1-0-0=bug_status&field0-0-0=noop&known_name=1WFixed&long_desc=&long_desc_type=substring&query_format=advanced&remaction=&short_desc=&short_desc_type=allwordssubstr&type-1-0-0=anyexact&type0-0-0=noop&value-1-0-0=RESOLVED%2CVERIFIED%2CCLOSED&value0-0-0=&ctype=csv' -O resolved-bugs.csv >& /dev/null")
os.system("wget 'http://bugs.openembedded.net/buglist.cgi?bug_file_loc=&bug_file_loc_type=allwordssubstr&bug_id=&bug_status=NEW&bugidtype=include&chfield=%5BBug%20creation%5D&chfieldfrom=7d&chfieldto=Now&chfieldvalue=&email1=&email2=&emailassigned_to1=1&emailassigned_to2=1&emailcc2=1&emailqa_contact2=1&emailreporter2=1&emailtype1=substring&emailtype2=substring&field-1-0-0=bug_status&field0-0-0=noop&long_desc=&long_desc_type=substring&query_format=advanced&remaction=&short_desc=&short_desc_type=allwordssubstr&type-1-0-0=anyexact&type0-0-0=noop&value-1-0-0=NEW&value0-0-0=&ctype=csv' -O new-bugs.csv &> /dev/null")
print "Bugs fixed:\n"
os.system("cat resolved-bugs.csv | awk -F, '{print $1 \" \" $7 \"\t \" $8}' | sed s:\\\"::g")
print "\nBugs opened:\n"
os.system("cat new-bugs.csv | awk -F, '{print $1 \" \" $7 \"\t \" $8}' | sed s:\\\"::g")
| mit |
miguelfervi/SSBW-Restaurantes | restaurantes/lib/python2.7/site-packages/django/utils/safestring.py | 478 | 4414 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils import six
from django.utils.functional import Promise, curry
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
def __html__(self):
"""
Returns the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if hasattr(s, '__html__'):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if hasattr(s, '__html__') or isinstance(s, EscapeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeString(str(s))
| gpl-3.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/bytes/tests/test_bytes_utils.py | 5 | 4420 | import io
import pytest
from dask.bytes.utils import read_block, seek_delimiter, infer_storage_options
def test_read_block():
delimiter = b'\n'
data = delimiter.join([b'123', b'456', b'789'])
f = io.BytesIO(data)
assert read_block(f, 1, 2) == b'23'
assert read_block(f, 0, 1, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 2, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 3, delimiter=b'\n') == b'123\n'
assert read_block(f, 0, 5, delimiter=b'\n') == b'123\n456\n'
assert read_block(f, 0, 8, delimiter=b'\n') == b'123\n456\n789'
assert read_block(f, 0, 100, delimiter=b'\n') == b'123\n456\n789'
assert read_block(f, 1, 1, delimiter=b'\n') == b''
assert read_block(f, 1, 5, delimiter=b'\n') == b'456\n'
assert read_block(f, 1, 8, delimiter=b'\n') == b'456\n789'
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)],
[(0, 4), (4, 4), (8, 4)]]:
out = [read_block(f, o, l, b'\n') for o, l in ols]
assert b"".join(filter(None, out)) == data
def test_seek_delimiter_endline():
f = io.BytesIO(b'123\n456\n789')
# if at zero, stay at zero
seek_delimiter(f, b'\n', 5)
assert f.tell() == 0
# choose the first block
for bs in [1, 5, 100]:
f.seek(1)
seek_delimiter(f, b'\n', blocksize=bs)
assert f.tell() == 4
# handle long delimiters well, even with short blocksizes
f = io.BytesIO(b'123abc456abc789')
for bs in [1, 2, 3, 4, 5, 6, 10]:
f.seek(1)
seek_delimiter(f, b'abc', blocksize=bs)
assert f.tell() == 6
# End at the end
f = io.BytesIO(b'123\n456')
f.seek(5)
seek_delimiter(f, b'\n', 5)
assert f.tell() == 7
def test_infer_storage_options():
so = infer_storage_options('/mnt/datasets/test.csv')
assert so.pop('protocol') == 'file'
assert so.pop('path') == '/mnt/datasets/test.csv'
assert not so
assert infer_storage_options('./test.csv')['path'] == './test.csv'
assert infer_storage_options('../test.csv')['path'] == '../test.csv'
so = infer_storage_options('C:\\test.csv')
assert so.pop('protocol') == 'file'
assert so.pop('path') == 'C:\\test.csv'
assert not so
assert infer_storage_options('d:\\test.csv')['path'] == 'd:\\test.csv'
assert infer_storage_options('\\test.csv')['path'] == '\\test.csv'
assert infer_storage_options('.\\test.csv')['path'] == '.\\test.csv'
assert infer_storage_options('test.csv')['path'] == 'test.csv'
so = infer_storage_options(
'hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm',
inherit_storage_options={'extra': 'value'})
assert so.pop('protocol') == 'hdfs'
assert so.pop('username') == 'username'
assert so.pop('password') == 'pwd'
assert so.pop('host') == 'Node'
assert so.pop('port') == 123
assert so.pop('path') == '/mnt/datasets/test.csv'
assert so.pop('url_query') == 'q=1'
assert so.pop('url_fragment') == 'fragm'
assert so.pop('extra') == 'value'
assert not so
so = infer_storage_options('hdfs://User-name@Node-name.com/mnt/datasets/test.csv')
assert so.pop('username') == 'User-name'
assert so.pop('host') == 'Node-name.com'
u = 'http://127.0.0.1:8080/test.csv'
assert infer_storage_options(u) == {'protocol': 'http', 'path': u}
# For s3 and gcs the netloc is actually the bucket name, so we want to
# include it in the path. Test that:
# - Parsing doesn't lowercase the bucket
# - The bucket is included in path
for protocol in ['s3', 'gcs', 'gs']:
options = infer_storage_options('%s://Bucket-name.com/test.csv' % protocol)
assert options['path'] == 'Bucket-name.com/test.csv'
with pytest.raises(KeyError):
infer_storage_options('file:///bucket/file.csv', {'path': 'collide'})
with pytest.raises(KeyError):
infer_storage_options('hdfs:///bucket/file.csv', {'protocol': 'collide'})
@pytest.mark.parametrize('urlpath, expected_path', (
(r'c:\foo\bar', r'c:\foo\bar'),
(r'C:\\foo\bar', r'C:\\foo\bar'),
(r'c:/foo/bar', r'c:/foo/bar'),
(r'file:///c|\foo\bar', r'c:\foo\bar'),
(r'file:///C|/foo/bar', r'C:/foo/bar'),
(r'file:///C:/foo/bar', r'C:/foo/bar'),
))
def test_infer_storage_options_c(urlpath, expected_path):
so = infer_storage_options(urlpath)
assert so['protocol'] == 'file'
assert so['path'] == expected_path
| gpl-3.0 |
androidarmv6/android_external_chromium_org | tools/unused-symbols-report.py | 178 | 4793 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints a report of symbols stripped by the linker due to being unused.
To use, build with these linker flags:
-Wl,--gc-sections
-Wl,--print-gc-sections
the first one is the default in Release; search build/common.gypi for it
and to see where to add the other.
Then build, saving the output into a file:
make chrome 2>&1 | tee buildlog
and run this script on it:
./tools/unused-symbols-report.py buildlog > report.html
"""
import cgi
import optparse
import os
import re
import subprocess
import sys
cppfilt_proc = None
def Demangle(sym):
"""Demangle a C++ symbol by passing it through c++filt."""
global cppfilt_proc
if cppfilt_proc is None:
cppfilt_proc = subprocess.Popen(['c++filt'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
print >>cppfilt_proc.stdin, sym
return cppfilt_proc.stdout.readline().strip()
def Unyuck(sym):
"""Attempt to prettify a C++ symbol by some basic heuristics."""
sym = sym.replace('std::basic_string<char, std::char_traits<char>, '
'std::allocator<char> >', 'std::string')
sym = sym.replace('std::basic_string<wchar_t, std::char_traits<wchar_t>, '
'std::allocator<wchar_t> >', 'std::wstring')
sym = sym.replace('std::basic_string<unsigned short, '
'base::string16_char_traits, '
'std::allocator<unsigned short> >', 'string16')
sym = re.sub(r', std::allocator<\S+\s+>', '', sym)
return sym
def Parse(input, skip_paths=None, only_paths=None):
"""Parse the --print-gc-sections build output.
Args:
input: iterable over the lines of the build output
Yields:
(target name, path to .o file, demangled symbol)
"""
symbol_re = re.compile(r"'\.text\.(\S+)' in file '(\S+)'$")
path_re = re.compile(r"^out/[^/]+/[^/]+/([^/]+)/(.*)$")
for line in input:
match = symbol_re.search(line)
if not match:
continue
symbol, path = match.groups()
symbol = Unyuck(Demangle(symbol))
path = os.path.normpath(path)
if skip_paths and skip_paths in path:
continue
if only_paths and only_paths not in path:
continue
match = path_re.match(path)
if not match:
print >>sys.stderr, "Skipping weird path", path
continue
target, path = match.groups()
yield target, path, symbol
# HTML header for our output page.
TEMPLATE_HEADER = """<!DOCTYPE html>
<head>
<style>
body {
font-family: sans-serif;
font-size: 0.8em;
}
h1, h2 {
font-weight: normal;
margin: 0.5em 0;
}
h2 {
margin-top: 1em;
}
tr:hover {
background: #eee;
}
.permalink {
padding-left: 1ex;
font-size: 80%;
text-decoration: none;
color: #ccc;
}
.symbol {
font-family: WebKitWorkAround, monospace;
margin-left: 4ex;
text-indent: -4ex;
padding: 0.5ex 1ex;
}
.file {
padding: 0.5ex 1ex;
padding-left: 2ex;
font-family: WebKitWorkAround, monospace;
font-size: 90%;
color: #777;
}
</style>
</head>
<body>
<h1>chrome symbols deleted at link time</h1>
"""
def Output(iter):
"""Print HTML given an iterable of (target, path, symbol) tuples."""
targets = {}
for target, path, symbol in iter:
entries = targets.setdefault(target, [])
entries.append((symbol, path))
print TEMPLATE_HEADER
print "<p>jump to target:"
print "<select onchange='document.location.hash = this.value'>"
for target in sorted(targets.keys()):
print "<option>%s</option>" % target
print "</select></p>"
for target in sorted(targets.keys()):
print "<h2>%s" % target
print "<a class=permalink href='#%s' name='%s'>#</a>" % (target, target)
print "</h2>"
print "<table width=100% cellspacing=0>"
for symbol, path in sorted(targets[target]):
htmlsymbol = cgi.escape(symbol).replace('::', '::<wbr>')
print "<tr><td><div class=symbol>%s</div></td>" % htmlsymbol
print "<td valign=top><div class=file>%s</div></td></tr>" % path
print "</table>"
def main():
parser = optparse.OptionParser(usage='%prog [options] buildoutput\n\n' +
__doc__)
parser.add_option("--skip-paths", metavar="STR", default="third_party",
help="skip paths matching STR [default=%default]")
parser.add_option("--only-paths", metavar="STR",
help="only include paths matching STR [default=%default]")
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
iter = Parse(open(args[0]),
skip_paths=opts.skip_paths,
only_paths=opts.only_paths)
Output(iter)
if __name__ == '__main__':
main()
| bsd-3-clause |
nmartensen/pandas | pandas/tests/scalar/test_interval.py | 7 | 4026 | from __future__ import division
from pandas import Interval
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
def test_math_add(self, interval):
expected = Interval(1, 2)
actual = interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + interval
assert expected == actual
actual = interval
actual += 1
assert expected == actual
msg = "unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, interval):
expected = Interval(-1, 0)
actual = interval - 1
assert expected == actual
actual = interval
actual -= 1
assert expected == actual
msg = "unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, interval):
expected = Interval(0, 2)
actual = interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * interval
assert expected == actual
actual = interval
actual *= 2
assert expected == actual
msg = "unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * Interval(1, 2)
msg = "can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, interval):
expected = Interval(0, 0.5)
actual = interval / 2.0
assert expected == actual
actual = interval
actual /= 2.0
assert expected == actual
msg = "unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
| bsd-3-clause |
RobinQuetin/CAIRIS-web | cairis/cairis/tests/GoalTests.py | 1 | 8342 | import logging
from urllib import quote
import jsonpickle
from Goal import Goal
from GoalEnvironmentProperties import GoalEnvironmentProperties
from tests.CairisTests import CairisTests
__author__ = 'Robin Quetin'
class GoalTests(CairisTests):
# region Class fields
logger = logging.getLogger(__name__)
existing_goal_id = 532
existing_goal_name = 'Multi-Factor Authentication'
existing_category = 'Maintain'
existing_environment_name_1 = 'Stroke'
existing_environment_name_2 = 'Psychosis'
goal_class = Goal.__module__+'.'+Goal.__name__
to_delete_ids = []
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/goals?session_id=test')
goals = jsonpickle.decode(rv.data)
self.assertIsNotNone(goals, 'No results after deserialization')
self.assertIsInstance(goals, dict, 'The result is not a dictionary as expected')
self.assertGreater(len(goals), 0, 'No goals in the dictionary')
self.logger.info('[%s] Goals found: %d', method, len(goals))
goal = goals.values()[0]
self.logger.info('[%s] First goal: %s [%d]\n', method, goal['theName'], goal['theId'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/goals/name/%s?session_id=test' % quote(self.existing_goal_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
self.logger.debug('[%s] Response data: %s', method, rv.data)
goal = jsonpickle.decode(rv.data)
self.assertIsNotNone(goal, 'No results after deserialization')
self.logger.info('[%s] Goal: %s [%d]\n', method, goal['theName'], goal['theId'])
def test_delete(self):
method = 'test_delete'
url = '/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName)
new_goal_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_goal_body)
self.app.post('/api/goals', content_type='application/json', data=new_goal_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
self.logger.info('[%s] Response data: %s', method, rv.data)
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/goals'
self.logger.info('[%s] URL: %s', method, url)
new_goal_body = self.prepare_json()
self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
rv = self.app.post(url, content_type='application/json', data=new_goal_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('goal_id', None)
self.assertIsNotNone(env_id, 'No goal ID returned')
self.assertGreater(env_id, 0, 'Invalid goal ID returned [%d]' % env_id)
self.logger.info('[%s] Goal ID: %d\n', method, env_id)
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
def test_put(self):
method = 'test_put'
url = '/api/goals'
self.logger.info('[%s] URL: %s', method, url)
new_goal_body = self.prepare_json()
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName))
rv = self.app.post(url, content_type='application/json', data=new_goal_body)
self.logger.debug('[%s] Response data: %s', method, rv.data)
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp, 'No results after deserialization')
env_id = json_resp.get('goal_id', None)
self.assertIsNotNone(env_id, 'No goal ID returned')
self.assertGreater(env_id, 0, 'Invalid goal ID returned [%d]' % env_id)
self.logger.info('[%s] Goal ID: %d', method, env_id)
goal_to_update = self.prepare_new_goal()
goal_to_update.theName = 'Edited test goal'
goal_to_update.theId = env_id
upd_env_body = self.prepare_json(goal=goal_to_update)
rv = self.app.put('/api/goals/name/%s?session_id=test' % quote(self.prepare_new_goal().theName), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(rv.data)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('successfully updated'), -1, 'The goal was not successfully updated')
rv = self.app.get('/api/goals/name/%s?session_id=test' % quote(goal_to_update.theName))
upd_goal = jsonpickle.decode(rv.data)
self.assertIsNotNone(upd_goal, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, rv.data)
self.logger.info('[%s] Goal: %s [%d]\n', method, upd_goal['theName'], upd_goal['theId'])
rv = self.app.delete('/api/goals/name/%s?session_id=test' % quote(goal_to_update.theName))
def prepare_new_goal(self):
new_goal_refinements = [
[
"PreventUnauthorised Certificate Access",
"goal",
"or",
"No",
"None"
]
]
new_subgoal_refinements = [
[
"PreventUnauthorised Certificate Access",
"goal",
"or",
"No",
"None"
]
]
new_goal_props = [
GoalEnvironmentProperties(
environmentName=self.existing_environment_name_1,
lbl='Test 1',
definition='This is a first test property',
category=self.existing_category,
priority='Medium',
fitCriterion='None',
issue='None',
goalRefinements=new_goal_refinements,
subGoalRefinements=new_subgoal_refinements,
concs=[],cas=[]
),
GoalEnvironmentProperties(
environmentName=self.existing_environment_name_2,
lbl='Test 2',
definition='This is a second test property',
category=self.existing_category,
priority='Low',
fitCriterion='None',
issue='Test issue',
goalRefinements=new_goal_refinements,
subGoalRefinements=new_subgoal_refinements,
concs=[],cas=[]
)
]
new_goal = Goal(
goalId=-1,
goalName='Test goal',
goalOrig='',
tags=['test', 'test123'],
environmentProperties=[]
)
new_goal.theEnvironmentProperties = new_goal_props
new_goal.theEnvironmentDictionary = {}
new_goal.theGoalPropertyDictionary = {}
delattr(new_goal, 'theEnvironmentDictionary')
delattr(new_goal, 'theGoalPropertyDictionary')
return new_goal
def prepare_dict(self, goal=None):
if goal is None:
goal = self.prepare_new_goal()
else:
assert isinstance(goal, Goal)
return {
'session_id': 'test',
'object': goal,
}
def prepare_json(self, data_dict=None, goal=None):
if data_dict is None:
data_dict = self.prepare_dict(goal=goal)
else:
assert isinstance(data_dict, dict)
new_goal_body = jsonpickle.encode(data_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_goal_body)
return new_goal_body | apache-2.0 |
omprakasha/odoo | addons/l10n_be_hr_payroll_account/__openerp__.py | 298 | 1626 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll with Accounting',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['l10n_be_hr_payroll', 'hr_payroll_account', 'l10n_be'],
'version': '1.0',
'description': """
Accounting Data for Belgian Payroll Rules.
==========================================
""",
'auto_install': True,
'website': 'https://www.odoo.com/page/accounting',
'demo': [],
'data':[
'l10n_be_wizard.yml',
'l10n_be_hr_payroll_account_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tiwillia/openshift-tools | scripts/remote-heal/remote-healer.py | 5 | 6385 | #!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Tool to process and take action on incoming zabbix triggers.
'''
# Disabling invalid-name because pylint doesn't like the naming conention we have.
# pylint: disable=invalid-name
import argparse
import ConfigParser
import logging
import os
import re
import shlex
import subprocess
import sys
class RemoteHealer(object):
''' Class to process zabbix triggers and take appropriate actions '''
def __init__(self):
self.CONFIG_FILE = '/etc/openshift_tools/remote_healer.conf'
# Default creds loader
self._creds_prefix = '/usr/local/bin/autokeys_loader'
self.load_config()
self._args = self.parse_args()
self.setup_logging()
logging.debug("Got args: " + str(self._args))
@staticmethod
def run_cmd(cmd):
''' Run passed in command (list-separated arguments) '''
logging.debug("running: %s", ' '.join(cmd))
try:
subprocess.call(cmd)
except OSError:
logging.info("failed to run: %s", ' '.join(cmd))
def cmd_builder(self, cmd):
''' Build command with default or user-provided prefix '''
new_cmd = [self._creds_prefix]
new_cmd.extend(cmd)
return new_cmd
def ossh_cmd(self, host, cmd):
''' Build command using ossh as root to specified host '''
ssh_cmd = ['ossh', host, '-l', 'root', '-c', cmd]
return self.cmd_builder(ssh_cmd)
@staticmethod
def parse_args():
''' Parse command line arguments passed in through the
SSH_ORIGINAL_COMMAND environment variable when READ_SSH is a
param.
Also handle when run manually. '''
my_args = None
read_ssh_env = False
# authorized_keys will force direct our command/argv to be
# 'remote-healer READ_SSH' with the original params stored
# in SSH_ORIGINAL_COMMAND
if "READ_SSH" in sys.argv:
read_ssh_env = True
parser = argparse.ArgumentParser(description='Take trigger values ' +
'from command line or ' +
'SSH_ORIGINAL_COMMAND and take ' +
'appropriate healing actions')
parser.add_argument("--host", required=True)
parser.add_argument("--trigger", required=True)
parser.add_argument("--trigger-val", required=True)
parser.add_argument("--verbose", action="store_true", help='Print to stdout')
parser.add_argument("--debug", action="store_true", help='Log more details')
if read_ssh_env:
cmd = os.environ.get("SSH_ORIGINAL_COMMAND", "")
# SSH_ORIGINAL_COMMAND will include the command part and not just
# the args. So drop the first lexical token before calling
# parse_args()
my_args = parser.parse_args(shlex.split(cmd)[1:])
else:
my_args = parser.parse_args()
return my_args
def setup_logging(self):
''' Configure logging '''
LOGFILE = "/var/log/remote-healer.log"
# Default log level
log_level = logging.INFO
if self._args.debug:
log_level = logging.DEBUG
logging.basicConfig(filename=LOGFILE, format="%(asctime)s %(message)s",
level=log_level)
if self._args.verbose:
# Print to stdout in addition to log file
logging.getLogger().addHandler(logging.StreamHandler())
def load_config(self):
''' Setup creds prefix to ensure creds are acquired before trying
to run a healing action. '''
config = ConfigParser.ConfigParser()
config.read(self.CONFIG_FILE)
if config.has_option('creds', 'loader'):
self._creds_prefix = config.get('creds', 'loader')
def validate_host(self):
''' Make sure host argument is non-malicious '''
# Hosts typically have the form of cluster-type-randomid
# ie. qe-master-a1b2c3 / qe-node-compute-a1b2c3
# ... there are exceptions: ansible-tower / puppet / use-ctl
regex = r'^[a-zA-Z0-9]+[a-zA-Z0-9-]*$'
match = re.search(regex, self._args.host)
if match == None:
logging.info("Host: %s doesn't match a know host pattern",
self._args.host)
sys.exit(1)
self._args.host = match.group(0)
def main(self):
''' Entry point for class '''
logging.info("host: " + self._args.host + " trigger: " +
self._args.trigger + " trigger value: " +
self._args.trigger_val)
# Validate passed in host arg since it will be used for ssh cmds
self.validate_host()
#
# Here we will match on the passed in trigger and take
# appropriate action.
# Be sure to have review by Joel Smith when making changes.
#
if re.search(r'^\[HEAL\] OVS may not be running on', self._args.trigger):
logging.info("Restarting OVS on " + self._args.host)
# Stop OpenShift/docker
cmd = self.ossh_cmd(self._args.host,
'systemctl stop atomic-openshift-node '
'atomic-openshift-master docker')
self.run_cmd(cmd)
# Restart Open vSwitch
cmd = self.ossh_cmd(self._args.host, 'systemctl restart openvswitch')
self.run_cmd(cmd)
# Start OpenShift/docker
cmd = self.ossh_cmd(self._args.host,
'systemctl start atomic-openshift-master '
'atomic-openshift-node docker')
self.run_cmd(cmd)
# Start up monitoring
cmd = self.ossh_cmd(self._args.host,
'systemctl start oso-rhel7-host-monitoring')
self.run_cmd(cmd)
# Run reporting to quiet down trigger
cmd = self.ossh_cmd(self._args.host,
'docker exec oso-rhe7-host-monitoring /usr/bin/cron-send-ovs-stats')
else:
logging.info("No healing action defined for trigger: " + self._args.trigger)
if __name__ == '__main__':
rmt_heal = RemoteHealer()
rmt_heal.main()
| apache-2.0 |
ssbarnea/ansible | lib/ansible/module_utils/facts/network/freebsd.py | 232 | 1190 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class FreeBSDNetworkCollector(NetworkCollector):
_fact_class = FreeBSDNetwork
_platform = 'FreeBSD'
| gpl-3.0 |
bitmazk/django-people | people/admin.py | 1 | 2123 | """Admin classes for the ``people`` app."""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from hvad.admin import TranslatableAdmin
from . import models
class NationalityAdmin(TranslatableAdmin):
"""Admin for the ``Nationality`` model."""
list_display = ['get_name', 'all_translations']
list_select_related = []
def get_name(self, obj):
return obj.name
get_name.short_description = _('Name')
class LinkAdmin(admin.ModelAdmin):
"""Admin for the ``Link`` model."""
list_display = ['person', 'link_type', 'url', ]
class LinkInline(admin.TabularInline):
"""Inline admin for ``Link`` objects."""
model = models.Link
class LinkTypeAdmin(TranslatableAdmin):
"""Admin for the ``LinkType`` model."""
list_display = ['get_name', 'ordering', 'all_translations', ]
list_select_related = []
def get_name(self, obj):
return obj.name
get_name.short_description = _('Name')
class PersonAdmin(TranslatableAdmin):
"""Admin for the ``Person`` model."""
inlines = [LinkInline, ]
list_display = [
'roman_first_name', 'roman_last_name', 'non_roman_first_name_link',
'non_roman_last_name', 'chosen_name', 'gender', 'title', 'role',
'phone', 'email', 'ordering', 'all_translations', ]
list_select_related = []
def non_roman_first_name_link(self, obj):
return u'<a href="{0}/">{1}</a>'.format(
obj.pk, obj.non_roman_first_name)
non_roman_first_name_link.allow_tags = True
non_roman_first_name_link.short_description = "Non roman first name"
class RoleAdmin(TranslatableAdmin):
"""Admin for the ``Role`` model."""
list_display = ['get_name', 'all_translations', ]
list_select_related = []
def get_name(self, obj):
return obj.name
get_name.short_description = _('Name')
admin.site.register(models.Nationality, NationalityAdmin)
admin.site.register(models.Link, LinkAdmin)
admin.site.register(models.LinkType, LinkTypeAdmin)
admin.site.register(models.Person, PersonAdmin)
admin.site.register(models.Role, RoleAdmin)
| mit |
wgerlach/pipeline | bin/archive/seq_type_guess.py | 3 | 3744 | #!/usr/bin/env python
import os, sys, math, random, subprocess, gzip
from collections import defaultdict
from optparse import OptionParser
from Bio import SeqIO
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def seq_iter(file_hdl, stype):
if stype == 'fastq':
return FastqGeneralIterator(file_hdl)
else:
return SeqIO.parse(file_hdl, stype)
def split_rec(rec, stype):
if stype == 'fastq':
return rec[0].split()[0], rec[1].upper(), rec[2]
else:
return rec.id, str(rec.seq).upper(), None
def countseqs(infile, gzip, stype):
headchar = '>'
if stype == 'fastq':
headchar = '@'
if gzip:
proc_in = subprocess.Popen( ['zcat', infile], stdout=subprocess.PIPE )
proc = subprocess.Popen( ['grep', '-c', "^%s"%headchar], stdin=proc_in.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
else:
proc = subprocess.Popen( ['grep', '-c', "^%s"%headchar, infile], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise IOError("%s\n%s"%(" ".join(cmd), stderr))
slen = stdout.strip()
if not slen:
sys.stderr.write("%s is invalid %s file\n"%(infile, stype))
exit(1)
return int(slen)
def get_seq_type(size, data):
kset = []
total = sum( data.values() )
for i in range(1, size+1):
kset.append( sub_kmer(i, total, data) )
# black box logic
if (kset[15] < 9.8) and (kset[10] < 6):
return "Amplicon"
else:
return "WGS"
def sub_kmer(pos, total, data):
sub_data = defaultdict(int)
entropy = 0
for kmer, num in data.iteritems():
sub_data[ kmer[:pos] ] += num
for skmer, snum in sub_data.iteritems():
sratio = float(snum) / total
entropy += (-1 * sratio) * math.log(sratio, 2)
return entropy
def main(args):
usage = "usage: %prog [options] -i input_fasta"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input", dest="input", default=None, help="Input sequence file")
parser.add_option("-o", "--output", dest="output", default=None, help="Output guess, if not called prints to STDOUT")
parser.add_option("-t", "--type", dest="type", default="fasta", help="Input file type. Must be fasta or fastq [default 'fasta']")
parser.add_option("-z", "--gzip", dest="gzip", default=False, action="store_true", help="Input file is gzipped [default is not]")
parser.add_option("-m", "--max_seq", dest="max_seq", default=100000, type="int", help="max number of seqs process [default 100000]")
# check options
(opts, args) = parser.parse_args()
if not opts.input:
parser.error("Missing input file")
if (opts.type != 'fasta') and (opts.type != 'fastq'):
parser.error("File type '%s' is invalid" %opts.type)
# set variables
if opts.gzip:
in_hdl = gzip.open(opts.input, "rb")
else:
in_hdl = open(opts.input, "rU")
seqnum = countseqs(opts.input, opts.gzip, opts.type)
seqper = (opts.max_seq * 1.0) / seqnum
kmer_len = 16
prefix_map = defaultdict(int)
# parse sequences
snum = 0
for rec in seq_iter(in_hdl, opts.type):
head, seq, qual = split_rec(rec, opts.type)
if (len(seq) >= kmer_len) and (seqper >= random.random()):
prefix_map[ seq[:kmer_len] ] += 1
snum += 1
# get stats
seq_type_guess = get_seq_type(kmer_len, prefix_map)
if not opts.output:
sys.stdout.write(seq_type_guess+"\n")
else:
out_hdl = open(opts.output, "w")
out_hdl.write(seq_type_guess+"\n")
out_hdl.close()
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| bsd-2-clause |
defionscode/ansible | lib/ansible/modules/network/aci/aci_switch_leaf_selector.py | 7 | 10169 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_switch_leaf_selector
short_description: Bind leaf selectors to switch policy leaf profiles (infra:LeafS, infra:NodeBlk, infra:RsAccNodePGrep)
description:
- Bind leaf selectors (with node block range and policy group) to switch policy leaf profiles on Cisco ACI fabrics.
notes:
- This module is to be used with M(aci_switch_policy_leaf_profile)
One first creates a leaf profile (infra:NodeP) and then creates an associated selector (infra:LeafS),
- More information about the internal APIC classes B(infra:LeafS), B(infra:NodeBlk) and B(infra:RsAccNodePGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
description:
description:
- The description to assign to the C(leaf).
leaf_profile:
description:
- Name of the Leaf Profile to which we add a Selector.
aliases: [ leaf_profile_name ]
leaf:
description:
- Name of Leaf Selector.
aliases: [ name, leaf_name, leaf_profile_leaf_name, leaf_selector_name ]
leaf_node_blk:
description:
- Name of Node Block range to be added to Leaf Selector of given Leaf Profile.
aliases: [ leaf_node_blk_name, node_blk_name ]
leaf_node_blk_description:
description:
- The description to assign to the C(leaf_node_blk)
from:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_from, from_range, range_from ]
to:
description:
- Start of Node Block range.
type: int
aliases: [ node_blk_range_to, to_range, range_to ]
policy_group:
description:
- Name of the Policy Group to be added to Leaf Selector of given Leaf Profile.
aliases: [ name, policy_group_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: adding a switch policy leaf profile selector associated Node Block range (w/ policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
policy_group: somepolicygroupname
state: present
delegate_to: localhost
- name: adding a switch policy leaf profile selector associated Node Block range (w/o policy group)
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
leaf_node_blk: node_blk_name
from: 1011
to: 1011
state: present
delegate_to: localhost
- name: Removing a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: absent
delegate_to: localhost
- name: Querying a switch policy leaf profile selector
aci_switch_leaf_selector:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
leaf: leaf_selector_name
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update({
'description': dict(type='str'),
'leaf_profile': dict(type='str', aliases=['leaf_profile_name']), # Not required for querying all objects
'leaf': dict(type='str', aliases=['name', 'leaf_name', 'leaf_profile_leaf_name', 'leaf_selector_name']), # Not required for querying all objects
'leaf_node_blk': dict(type='str', aliases=['leaf_node_blk_name', 'node_blk_name']),
'leaf_node_blk_description': dict(type='str'),
# NOTE: Keyword 'from' is a reserved word in python, so we need it as a string
'from': dict(type='int', aliases=['node_blk_range_from', 'from_range', 'range_from']),
'to': dict(type='int', aliases=['node_blk_range_to', 'to_range', 'range_to']),
'policy_group': dict(type='str', aliases=['policy_group_name']),
'state': dict(type='str', default='present', choices=['absent', 'present', 'query']),
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_profile', 'leaf']],
['state', 'present', ['leaf_profile', 'leaf', 'leaf_node_blk', 'from', 'to']]
]
)
description = module.params['description']
leaf_profile = module.params['leaf_profile']
leaf = module.params['leaf']
leaf_node_blk = module.params['leaf_node_blk']
leaf_node_blk_description = module.params['leaf_node_blk_description']
from_ = module.params['from']
to_ = module.params['to']
policy_group = module.params['policy_group']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraNodeP',
aci_rn='infra/nprof-{0}'.format(leaf_profile),
module_object=leaf_profile,
target_filter={'name': leaf_profile},
),
subclass_1=dict(
aci_class='infraLeafS',
# NOTE: normal rn: leaves-{name}-typ-{type}, hence here hardcoded to range for purposes of module
aci_rn='leaves-{0}-typ-range'.format(leaf),
module_object=leaf,
target_filter={'name': leaf},
),
# NOTE: infraNodeBlk is not made into a subclass because there is a 1-1 mapping between node block and leaf selector name
child_classes=['infraNodeBlk', 'infraRsAccNodePGrp'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraLeafS',
class_config=dict(
descr=description,
name=leaf,
),
child_configs=[
dict(
infraNodeBlk=dict(
attributes=dict(
descr=leaf_node_blk_description,
name=leaf_node_blk,
from_=from_,
to_=to_,
),
),
),
dict(
infraRsAccNodePGrp=dict(
attributes=dict(
tDn='uni/infra/funcprof/accnodepgrp-{0}'.format(policy_group),
),
),
),
],
)
aci.get_diff(aci_class='infraLeafS')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
Kingdread/qutebrowser | qutebrowser/commands/runners.py | 5 | 8580 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Module containing command managers (SearchRunner and CommandRunner)."""
import collections
from PyQt5.QtCore import pyqtSlot, QUrl, QObject
from qutebrowser.config import config, configexc
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.utils import message, log, objreg, qtutils
from qutebrowser.misc import split
ParseResult = collections.namedtuple('ParseResult', 'cmd, args, cmdline')
def replace_variables(win_id, arglist):
"""Utility function to replace variables like {url} in a list of args."""
args = []
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if '{url}' in arglist:
try:
url = tabbed_browser.current_url().toString(QUrl.FullyEncoded |
QUrl.RemovePassword)
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
for arg in arglist:
if arg == '{url}':
args.append(url)
else:
args.append(arg)
return args
class CommandRunner(QObject):
"""Parse and run qutebrowser commandline commands.
Attributes:
_win_id: The window this CommandRunner is associated with.
"""
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
def _get_alias(self, text):
"""Get an alias from the config.
Args:
text: The text to parse.
Return:
None if no alias was found.
The new command string if an alias was found.
"""
parts = text.strip().split(maxsplit=1)
try:
alias = config.get('aliases', parts[0])
except (configexc.NoOptionError, configexc.NoSectionError):
return None
try:
new_cmd = '{} {}'.format(alias, parts[1])
except IndexError:
new_cmd = alias
if text.endswith(' '):
new_cmd += ' '
return new_cmd
def parse_all(self, text, *args, **kwargs):
"""Split a command on ;; and parse all parts.
If the first command in the commandline is a non-split one, it only
returns that.
Args:
text: Text to parse.
*args/**kwargs: Passed to parse().
Yields:
ParseResult tuples.
"""
if ';;' in text:
# Get the first command and check if it doesn't want to have ;;
# split.
first = text.split(';;')[0]
result = self.parse(first, *args, **kwargs)
if result.cmd.no_cmd_split:
sub_texts = [text]
else:
sub_texts = [e.strip() for e in text.split(';;')]
else:
sub_texts = [text]
for sub in sub_texts:
yield self.parse(sub, *args, **kwargs)
def parse(self, text, *, aliases=True, fallback=False, keep=False):
"""Split the commandline text into command and arguments.
Args:
text: Text to parse.
aliases: Whether to handle aliases.
fallback: Whether to do a fallback splitting when the command was
unknown.
keep: Whether to keep special chars and whitespace
Return:
A (cmd, args, cmdline) ParseResult tuple.
"""
cmdstr, sep, argstr = text.partition(' ')
if not cmdstr and not fallback:
raise cmdexc.NoSuchCommandError("No command given")
if aliases:
new_cmd = self._get_alias(text)
if new_cmd is not None:
log.commands.debug("Re-parsing with '{}'.".format(new_cmd))
return self.parse(new_cmd, aliases=False, fallback=fallback,
keep=keep)
try:
cmd = cmdutils.cmd_dict[cmdstr]
except KeyError:
if fallback:
cmd = None
args = None
if keep:
cmdstr, sep, argstr = text.partition(' ')
cmdline = [cmdstr, sep] + argstr.split()
else:
cmdline = text.split()
else:
raise cmdexc.NoSuchCommandError('{}: no such command'.format(
cmdstr))
else:
args = self._split_args(cmd, argstr, keep)
if keep and args:
cmdline = [cmdstr, sep + args[0]] + args[1:]
elif keep:
cmdline = [cmdstr, sep]
else:
cmdline = [cmdstr] + args[:]
return ParseResult(cmd=cmd, args=args, cmdline=cmdline)
def _split_args(self, cmd, argstr, keep):
"""Split the arguments from an arg string.
Args:
cmd: The command we're currently handling.
argstr: An argument string.
keep: Whether to keep special chars and whitespace
Return:
A list containing the splitted strings.
"""
if not argstr:
return []
elif cmd.maxsplit is None:
return split.split(argstr, keep=keep)
else:
# If split=False, we still want to split the flags, but not
# everything after that.
# We first split the arg string and check the index of the first
# non-flag args, then we re-split again properly.
# example:
#
# input: "--foo -v bar baz"
# first split: ['--foo', '-v', 'bar', 'baz']
# 0 1 2 3
# second split: ['--foo', '-v', 'bar baz']
# (maxsplit=2)
split_args = split.simple_split(argstr, keep=keep)
flag_arg_count = 0
for i, arg in enumerate(split_args):
arg = arg.strip()
if arg.startswith('-'):
if arg in cmd.flags_with_args:
flag_arg_count += 1
else:
maxsplit = i + cmd.maxsplit + flag_arg_count
return split.simple_split(argstr, keep=keep,
maxsplit=maxsplit)
else: # pylint: disable=useless-else-on-loop
# If there are only flags, we got it right on the first try
# already.
return split_args
def run(self, text, count=None):
"""Parse a command from a line of text and run it.
Args:
text: The text to parse.
count: The count to pass to the command.
"""
for result in self.parse_all(text):
args = replace_variables(self._win_id, result.args)
if count is not None:
result.cmd.run(self._win_id, args, count=count)
else:
result.cmd.run(self._win_id, args)
@pyqtSlot(str, int)
def run_safely(self, text, count=None):
"""Run a command and display exceptions in the statusbar."""
try:
self.run(text, count)
except (cmdexc.CommandMetaError, cmdexc.CommandError) as e:
message.error(self._win_id, e, immediately=True)
@pyqtSlot(str, int)
def run_safely_init(self, text, count=None):
"""Run a command and display exceptions in the statusbar.
Contrary to run_safely, error messages are queued so this is more
suitable to use while initializing."""
try:
self.run(text, count)
except (cmdexc.CommandMetaError, cmdexc.CommandError) as e:
message.error(self._win_id, e)
| gpl-3.0 |
with-git/tensorflow | tensorflow/python/kernel_tests/cholesky_op_test.py | 4 | 14711 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.test_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.test_session(use_gpu=False):
with self.assertRaisesOpError(
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
# TODO(eriche): investigate why this test fails only in opensource
# ubuntu gpu python3
# def testSmallMatricesComplex(self):
# np.random.seed(0)
# shapes = self.getShapes([1, 2, 10])
# self.runFiniteDifferences(
# shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.test_session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.test_session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
| apache-2.0 |
mikeing2001/LoopDetection | pox/forwarding/hub.py | 32 | 1184 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Turns your complex OpenFlow switches into stupid hubs.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
log = core.getLogger()
def _handle_ConnectionUp (event):
msg = of.ofp_flow_mod()
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
event.connection.send(msg)
log.info("Hubifying %s", dpidToStr(event.dpid))
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
log.info("Hub running.")
| gpl-3.0 |
madelynfreed/rlundo | venv/lib/python2.7/site-packages/yaml/resolver.py | 474 | 8972 |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| gpl-3.0 |
jocave/snapcraft | integration_tests/test_plainbox_provider_plugin.py | 8 | 1165 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import FileExists
import integration_tests
class PlainboxProviderPluginTestCase(integration_tests.TestCase):
def test_snap_simple_provider(self):
project_dir = 'simple-plainbox-provider'
self.run_snapcraft('stage', project_dir)
self.assertThat(
os.path.join(
project_dir, 'stage', 'providers', 'simple-plainbox-provider',
'plainbox-provider-simple.provider'),
FileExists())
| gpl-3.0 |
Onirik79/aaritmud | src/controllers/game_connection_comet.py | 1 | 1690 | # -*- coding: utf-8 -*-
"""
Modulo per la gestione della connessione al gioco con tecnologia simil-comet.
"""
#= IMPORT ======================================================================
from twisted.web.server import NOT_DONE_YET
from src.web_resource import WebResource
#= CLASSI ======================================================================
class GameConnectionCometPage(WebResource):
"""
Controller della connessione al gioco.
"""
TITLE = "__game_connection_comet__"
ACCOUNT_MUST_EXIST_IN_GET = True
ACCOUNT_MUST_EXIST_IN_POST = True
PLAYER_MUST_EXIST_IN_GET = True
PLAYER_MUST_EXIST_IN_POST = True
def create_header(self, request, conn):
return ""
#- Fine Metodo -
def create_menu(self, request, conn):
return ""
#- Fine Metodo -
def create_square(self, request, conn):
return ""
#- Fine Metodo -
def create_footer(self, request, conn):
return ""
#- Fine Metodo -
def render_GET(self, request, conn):
conn.player.game_request = request
# Si prepara la deferred relativa alla terminazione della richiesta
conn.defer_exit_from_game = request.notifyFinish()
# La callback normale non capita perché quando un pg chiude la pagina
# questo evento genera un'eccezione che fa scatenare invece la errback
# per questo sono tutte e due uguali
conn.defer_exit_from_game.addCallback(conn.player.exit_from_game)
conn.defer_exit_from_game.addErrback(conn.player.exit_from_game)
conn.player.enter_in_game()
conn.player.send_prompt()
return NOT_DONE_YET
#- Fine Metodo -
| gpl-2.0 |
DirkHoffmann/indico | indico/modules/events/notes/models/notes_test.py | 4 | 6197 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from sqlalchemy import inspect
from sqlalchemy.exc import IntegrityError
from indico.modules.events.notes.models.notes import EventNote, EventNoteRevision, RenderMode
@pytest.fixture
def note(db, dummy_event):
note = EventNote(object=dummy_event)
db.session.expunge(note) # keep it out of the SA session (linking it to the event adds it)
return note
@pytest.mark.parametrize('deleted', (True, False))
def test_create_revision_previously_deleted(db, note, dummy_user, deleted):
note.is_deleted = deleted
note.create_revision(RenderMode.html, 'revision', dummy_user)
assert not note.is_deleted
def test_revisions(db, note, dummy_user):
rev1 = note.create_revision(RenderMode.html, 'first', dummy_user)
db.session.add(note)
db.session.flush()
assert rev1 == note.current_revision
assert note.current_revision.html == note.html == 'first'
assert set(note.revisions) == {rev1}
rev2 = note.create_revision(RenderMode.html, 'second', dummy_user)
db.session.flush()
assert rev2 == note.current_revision
assert note.current_revision.html == note.html == 'second'
assert set(note.revisions) == {rev2, rev1} # order_by is only applied when loading so just check the contents here
def test_change_current_revision(db, note, dummy_user):
rev1 = note.create_revision(RenderMode.html, 'first', dummy_user)
rev2 = note.create_revision(RenderMode.html, 'second', dummy_user)
assert note.current_revision == rev2
db.session.flush()
note.current_revision = rev1
db.session.flush()
assert note.current_revision.html == note.html == 'first'
assert set(note.revisions) == {rev2, rev1}
def test_clear_current_revision(note, dummy_user):
note.create_revision(RenderMode.html, 'first', dummy_user)
with pytest.raises(ValueError):
note.current_revision = None
def test_delete_current_revision(db, note, dummy_user):
note.create_revision(RenderMode.html, 'first', dummy_user)
rev = note.create_revision(RenderMode.html, 'second', dummy_user)
db.session.add(note)
db.session.flush()
note.revisions.remove(rev)
with pytest.raises(IntegrityError):
db.session.flush()
def test_create_same_revision(db, create_user, note, dummy_user):
user = create_user(123)
note.create_revision(RenderMode.html, 'test', dummy_user)
note.create_revision(RenderMode.html, 'test', user)
db.session.add(note)
db.session.flush()
assert len(note.revisions) == 1
assert note.current_revision.user == dummy_user
def test_delete_other_revision(db, note, dummy_user):
rev1 = note.create_revision(RenderMode.html, 'first', dummy_user)
rev2 = note.create_revision(RenderMode.html, 'second', dummy_user)
db.session.add(note)
db.session.flush()
note.revisions.remove(rev1)
db.session.flush()
assert set(note.revisions) == {rev2}
assert EventNoteRevision.query.count() == 1
def test_modify_old_revision_source(db, note, dummy_user):
rev1 = note.create_revision(RenderMode.html, 'first', dummy_user)
rev2 = note.create_revision(RenderMode.html, 'second', dummy_user)
db.session.add(note)
db.session.flush()
rev1.source = 'rewritten history'
assert note.html == rev2.html == 'second'
def test_modify_current_revision_source(db, note, dummy_user):
note.create_revision(RenderMode.html, 'first', dummy_user)
rev = note.create_revision(RenderMode.html, 'second', dummy_user)
db.session.add(note)
db.session.flush()
note.current_revision.source = 'rewritten history'
assert note.html == rev.html == 'rewritten history'
def test_render_html(note, dummy_user):
note.create_revision(RenderMode.html, '<strong>test</strong>', dummy_user)
assert note.html == note.current_revision.html == '<strong>test</strong>'
def test_render_markdown(note, dummy_user):
note.create_revision(RenderMode.markdown, '**test**\n*foo*', dummy_user)
assert note.html == note.current_revision.html == '<p><strong>test</strong><br>\n<em>foo</em></p>'
def test_get_for_linked_object(note, dummy_user, create_event):
note.create_revision(RenderMode.html, 'test', dummy_user)
assert EventNote.get_for_linked_object(note.object) == note
assert EventNote.get_for_linked_object(create_event(123)) is None
@pytest.mark.parametrize('preload', (True, False))
def test_get_for_linked_object_preload(note, dummy_user, count_queries, preload):
note.create_revision(RenderMode.html, 'test', dummy_user)
assert EventNote.get_for_linked_object(note.object, preload_event=preload)
with count_queries() as cnt:
EventNote.get_for_linked_object(note.object)
assert (cnt() == 0) == preload
def test_get_for_linked_object_deleted(note, dummy_user):
note.create_revision(RenderMode.html, 'test', dummy_user)
note.is_deleted = True
assert EventNote.get_for_linked_object(note.object) is None
def test_get_or_create(db, dummy_user, dummy_event, create_event):
note = EventNote.get_or_create(dummy_event)
assert note is not None
assert not inspect(note).persistent # new object
note.create_revision(RenderMode.html, 'test', dummy_user)
note.is_deleted = True
db.session.flush()
# get deleted one
assert EventNote.get_or_create(dummy_event) == note
assert inspect(note).persistent
note.is_deleted = False
db.session.flush()
# same if it's not deleted
assert EventNote.get_or_create(dummy_event) == note
assert inspect(note).persistent
# other event should create a new one
other = EventNote.get_or_create(create_event(123))
other.create_revision(RenderMode.html, 'test', dummy_user)
assert other != note
assert not inspect(other).persistent
def test_delete(note, dummy_user):
note.create_revision(RenderMode.html, 'test', dummy_user)
note.delete(dummy_user)
assert len(note.revisions) == 2
assert note.html == ''
assert note.is_deleted
| gpl-3.0 |
sssemil/cjdns | node_build/dependencies/libuv/build/gyp/test/win/gyptest-cl-optimizations.py | 247 | 3416 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure optimization settings are extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('optimizations.gyp', chdir=CHDIR)
# It's hard to map flags to output contents in a non-fragile way (especially
# handling both 2008/2010), so just verify the correct ninja command line
# contents.
ninja_file = test.built_file_path('obj/test_opt_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Od')
ninja_file = test.built_file_path('obj/test_opt_lev_size.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O1')
ninja_file = test.built_file_path('obj/test_opt_lev_speed.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /O2')
ninja_file = test.built_file_path('obj/test_opt_lev_max.ninja', chdir=CHDIR)
test.must_contain(ninja_file, 'cflags = /Ox')
ninja_file = test.built_file_path('obj/test_opt_unset.ninja', chdir=CHDIR)
test.must_not_contain(ninja_file, '/Od')
test.must_not_contain(ninja_file, '/O1')
test.must_not_contain(ninja_file, '/Ox')
# Set by default if none specified.
test.must_contain(ninja_file, '/O2')
ninja_file = test.built_file_path('obj/test_opt_fpo.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy')
test.must_not_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_fpo_off.ninja', chdir=CHDIR)
test.must_contain(ninja_file, '/Oy-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi')
test.must_not_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_intrinsic_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Oi-')
ninja_file = test.built_file_path('obj/test_opt_inline_off.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob0')
ninja_file = test.built_file_path('obj/test_opt_inline_manual.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob1')
ninja_file = test.built_file_path('obj/test_opt_inline_auto.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ob2')
ninja_file = test.built_file_path('obj/test_opt_neither.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/Os')
test.must_not_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_size.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Os')
ninja_file = test.built_file_path('obj/test_opt_speed.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/Ot')
ninja_file = test.built_file_path('obj/test_opt_wpo.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GL')
ninja_file = test.built_file_path('obj/test_opt_sp.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_sp_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GF')
ninja_file = test.built_file_path('obj/test_opt_fso.ninja',
chdir=CHDIR)
test.must_contain(ninja_file, '/GT')
ninja_file = test.built_file_path('obj/test_opt_fso_off.ninja',
chdir=CHDIR)
test.must_not_contain(ninja_file, '/GT')
test.pass_test()
| gpl-3.0 |
terbolous/SickRage | lib/guessit/rules/__init__.py | 20 | 2370 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Rebulk object default builder
"""
from rebulk import Rebulk
from .markers.path import path
from .markers.groups import groups
from .properties.episodes import episodes
from .properties.container import container
from .properties.format import format_
from .properties.video_codec import video_codec
from .properties.audio_codec import audio_codec
from .properties.screen_size import screen_size
from .properties.website import website
from .properties.date import date
from .properties.title import title
from .properties.episode_title import episode_title
from .properties.language import language
from .properties.country import country
from .properties.release_group import release_group
from .properties.other import other
from .properties.edition import edition
from .properties.cds import cds
from .properties.bonus import bonus
from .properties.film import film
from .properties.part import part
from .properties.crc import crc
from .properties.mimetype import mimetype
from .properties.type import type_
from .processors import processors
def rebulk_builder():
"""
Default builder for main Rebulk object used by api.
:return: Main Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
rebulk.rebulk(path())
rebulk.rebulk(groups())
rebulk.rebulk(episodes())
rebulk.rebulk(container())
rebulk.rebulk(format_())
rebulk.rebulk(video_codec())
rebulk.rebulk(audio_codec())
rebulk.rebulk(screen_size())
rebulk.rebulk(website())
rebulk.rebulk(date())
rebulk.rebulk(title())
rebulk.rebulk(episode_title())
rebulk.rebulk(language())
rebulk.rebulk(country())
rebulk.rebulk(release_group())
rebulk.rebulk(other())
rebulk.rebulk(edition())
rebulk.rebulk(cds())
rebulk.rebulk(bonus())
rebulk.rebulk(film())
rebulk.rebulk(part())
rebulk.rebulk(crc())
rebulk.rebulk(processors())
rebulk.rebulk(mimetype())
rebulk.rebulk(type_())
def customize_properties(properties):
"""
Customize default rebulk properties
"""
count = properties['count']
del properties['count']
properties['season_count'] = count
properties['episode_count'] = count
return properties
rebulk.customize_properties = customize_properties
return rebulk
| gpl-3.0 |
coldmind/django | django/utils/cache.py | 99 | 11172 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control']))
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
if not response.streaming:
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set(header.lower() for header in vary_headers)
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set(header.lower() for header in vary_headers)
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependent names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(force_bytes(value))
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request URL from the
response object. It stores those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| bsd-3-clause |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/ec2/autoscale/test_group.py | 90 | 36594 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
from datetime import datetime
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.autoscale import AutoScaleConnection
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale.policy import ScalingPolicy
from boto.ec2.autoscale.tag import Tag
from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping
from boto.ec2.autoscale import launchconfig, LaunchConfiguration
class TestAutoScaleGroup(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestAutoScaleGroup, self).setUp()
def default_body(self):
return b"""
<CreateLaunchConfigurationResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</CreateLaunchConfigurationResponse>
"""
def test_autoscaling_group_with_termination_policies(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='foo', launch_config='lauch_config',
min_size=1, max_size=2,
termination_policies=['OldestInstance', 'OldestLaunchConfiguration'],
instance_id='test-id')
self.service_connection.create_auto_scaling_group(autoscale)
self.assert_request_parameters({
'Action': 'CreateAutoScalingGroup',
'AutoScalingGroupName': 'foo',
'LaunchConfigurationName': 'lauch_config',
'MaxSize': 2,
'MinSize': 1,
'TerminationPolicies.member.1': 'OldestInstance',
'TerminationPolicies.member.2': 'OldestLaunchConfiguration',
'InstanceId': 'test-id',
}, ignore_params_values=['Version'])
def test_autoscaling_group_single_vpc_zone_identifier(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='foo',
vpc_zone_identifier='vpc_zone_1')
self.service_connection.create_auto_scaling_group(autoscale)
self.assert_request_parameters({
'Action': 'CreateAutoScalingGroup',
'AutoScalingGroupName': 'foo',
'VPCZoneIdentifier': 'vpc_zone_1',
}, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version'])
def test_autoscaling_group_vpc_zone_identifier_list(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='foo',
vpc_zone_identifier=['vpc_zone_1', 'vpc_zone_2'])
self.service_connection.create_auto_scaling_group(autoscale)
self.assert_request_parameters({
'Action': 'CreateAutoScalingGroup',
'AutoScalingGroupName': 'foo',
'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2',
}, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version'])
def test_autoscaling_group_vpc_zone_identifier_multi(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='foo',
vpc_zone_identifier='vpc_zone_1,vpc_zone_2')
self.service_connection.create_auto_scaling_group(autoscale)
self.assert_request_parameters({
'Action': 'CreateAutoScalingGroup',
'AutoScalingGroupName': 'foo',
'VPCZoneIdentifier': 'vpc_zone_1,vpc_zone_2',
}, ignore_params_values=['MaxSize', 'MinSize', 'LaunchConfigurationName', 'Version'])
class TestAutoScaleGroupHonorCooldown(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
return b"""
<SetDesiredCapacityResponse>
<ResponseMetadata>
<RequestId>9fb7e2db-6998-11e2-a985-57c82EXAMPLE</RequestId>
</ResponseMetadata>
</SetDesiredCapacityResponse>
"""
def test_honor_cooldown(self):
self.set_http_response(status_code=200)
self.service_connection.set_desired_capacity('foo', 10, True)
self.assert_request_parameters({
'Action': 'SetDesiredCapacity',
'AutoScalingGroupName': 'foo',
'DesiredCapacity': 10,
'HonorCooldown': 'true',
}, ignore_params_values=['Version'])
class TestScheduledGroup(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestScheduledGroup, self).setUp()
def default_body(self):
return b"""
<PutScheduledUpdateGroupActionResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</PutScheduledUpdateGroupActionResponse>
"""
def test_scheduled_group_creation(self):
self.set_http_response(status_code=200)
self.service_connection.create_scheduled_group_action('foo',
'scheduled-foo',
desired_capacity=1,
start_time=datetime(2013, 1, 1, 22, 55, 31),
end_time=datetime(2013, 2, 1, 22, 55, 31),
min_size=1,
max_size=2,
recurrence='0 10 * * *')
self.assert_request_parameters({
'Action': 'PutScheduledUpdateGroupAction',
'AutoScalingGroupName': 'foo',
'ScheduledActionName': 'scheduled-foo',
'MaxSize': 2,
'MinSize': 1,
'DesiredCapacity': 1,
'EndTime': '2013-02-01T22:55:31',
'StartTime': '2013-01-01T22:55:31',
'Recurrence': '0 10 * * *',
}, ignore_params_values=['Version'])
class TestParseAutoScaleGroupResponse(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
return b"""
<DescribeAutoScalingGroupsResult>
<AutoScalingGroups>
<member>
<Tags/>
<SuspendedProcesses/>
<AutoScalingGroupName>test_group</AutoScalingGroupName>
<HealthCheckType>EC2</HealthCheckType>
<CreatedTime>2012-09-27T20:19:47.082Z</CreatedTime>
<EnabledMetrics/>
<LaunchConfigurationName>test_launchconfig</LaunchConfigurationName>
<Instances>
<member>
<HealthStatus>Healthy</HealthStatus>
<AvailabilityZone>us-east-1a</AvailabilityZone>
<InstanceId>i-z118d054</InstanceId>
<LaunchConfigurationName>test_launchconfig</LaunchConfigurationName>
<LifecycleState>InService</LifecycleState>
</member>
</Instances>
<DesiredCapacity>1</DesiredCapacity>
<AvailabilityZones>
<member>us-east-1c</member>
<member>us-east-1a</member>
</AvailabilityZones>
<LoadBalancerNames/>
<MinSize>1</MinSize>
<VPCZoneIdentifier/>
<HealthCheckGracePeriod>0</HealthCheckGracePeriod>
<DefaultCooldown>300</DefaultCooldown>
<AutoScalingGroupARN>myarn</AutoScalingGroupARN>
<TerminationPolicies>
<member>OldestInstance</member>
<member>OldestLaunchConfiguration</member>
</TerminationPolicies>
<MaxSize>2</MaxSize>
<InstanceId>Something</InstanceId>
</member>
</AutoScalingGroups>
</DescribeAutoScalingGroupsResult>
"""
def test_get_all_groups_is_parsed_correctly(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_groups(names=['test_group'])
self.assertEqual(len(response), 1, response)
as_group = response[0]
self.assertEqual(as_group.availability_zones, ['us-east-1c', 'us-east-1a'])
self.assertEqual(as_group.default_cooldown, 300)
self.assertEqual(as_group.desired_capacity, 1)
self.assertEqual(as_group.enabled_metrics, [])
self.assertEqual(as_group.health_check_period, 0)
self.assertEqual(as_group.health_check_type, 'EC2')
self.assertEqual(as_group.launch_config_name, 'test_launchconfig')
self.assertEqual(as_group.load_balancers, [])
self.assertEqual(as_group.min_size, 1)
self.assertEqual(as_group.max_size, 2)
self.assertEqual(as_group.name, 'test_group')
self.assertEqual(as_group.suspended_processes, [])
self.assertEqual(as_group.tags, [])
self.assertEqual(as_group.termination_policies,
['OldestInstance', 'OldestLaunchConfiguration'])
self.assertEqual(as_group.instance_id, 'Something')
class TestDescribeTerminationPolicies(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
return b"""
<DescribeTerminationPolicyTypesResponse>
<DescribeTerminationPolicyTypesResult>
<TerminationPolicyTypes>
<member>ClosestToNextInstanceHour</member>
<member>Default</member>
<member>NewestInstance</member>
<member>OldestInstance</member>
<member>OldestLaunchConfiguration</member>
</TerminationPolicyTypes>
</DescribeTerminationPolicyTypesResult>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</DescribeTerminationPolicyTypesResponse>
"""
def test_autoscaling_group_with_termination_policies(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_termination_policies()
self.assertListEqual(
response,
['ClosestToNextInstanceHour', 'Default',
'NewestInstance', 'OldestInstance', 'OldestLaunchConfiguration'])
class TestLaunchConfigurationDescribe(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
# This is a dummy response
return b"""
<DescribeLaunchConfigurationsResponse>
<DescribeLaunchConfigurationsResult>
<LaunchConfigurations>
<member>
<AssociatePublicIpAddress>true</AssociatePublicIpAddress>
<SecurityGroups/>
<CreatedTime>2013-01-21T23:04:42.200Z</CreatedTime>
<KernelId/>
<LaunchConfigurationName>my-test-lc</LaunchConfigurationName>
<UserData/>
<InstanceType>m1.small</InstanceType>
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc</LaunchConfigurationARN>
<BlockDeviceMappings/>
<ImageId>ami-514ac838</ImageId>
<KeyName/>
<RamdiskId/>
<InstanceMonitoring>
<Enabled>true</Enabled>
</InstanceMonitoring>
<EbsOptimized>false</EbsOptimized>
<ClassicLinkVPCId>vpc-12345</ClassicLinkVPCId>
<ClassicLinkVPCSecurityGroups>
<member>sg-1234</member>
</ClassicLinkVPCSecurityGroups>
</member>
</LaunchConfigurations>
</DescribeLaunchConfigurationsResult>
<ResponseMetadata>
<RequestId>d05a22f8-b690-11e2-bf8e-2113fEXAMPLE</RequestId>
</ResponseMetadata>
</DescribeLaunchConfigurationsResponse>
"""
def test_get_all_launch_configurations(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_launch_configurations()
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 1)
self.assertTrue(isinstance(response[0], LaunchConfiguration))
self.assertEqual(response[0].associate_public_ip_address, True)
self.assertEqual(response[0].name, "my-test-lc")
self.assertEqual(response[0].instance_type, "m1.small")
self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc")
self.assertEqual(response[0].image_id, "ami-514ac838")
self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring))
self.assertEqual(response[0].instance_monitoring.enabled, 'true')
self.assertEqual(response[0].ebs_optimized, False)
self.assertEqual(response[0].block_device_mappings, [])
self.assertEqual(response[0].classic_link_vpc_id, 'vpc-12345')
self.assertEqual(response[0].classic_link_vpc_security_groups,
['sg-1234'])
self.assert_request_parameters({
'Action': 'DescribeLaunchConfigurations',
}, ignore_params_values=['Version'])
def test_get_all_configuration_limited(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"])
self.assert_request_parameters({
'Action': 'DescribeLaunchConfigurations',
'MaxRecords': 10,
'LaunchConfigurationNames.member.1': 'my-test1',
'LaunchConfigurationNames.member.2': 'my-test2'
}, ignore_params_values=['Version'])
class TestLaunchConfiguration(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
# This is a dummy response
return b"""
<DescribeLaunchConfigurationsResponse>
</DescribeLaunchConfigurationsResponse>
"""
def test_launch_config(self):
# This unit test is based on #753 and #1343
self.set_http_response(status_code=200)
dev_sdf = EBSBlockDeviceType(snapshot_id='snap-12345')
bdm = BlockDeviceMapping()
bdm['/dev/sdf'] = dev_sdf
lc = launchconfig.LaunchConfiguration(
connection=self.service_connection,
name='launch_config',
image_id='123456',
instance_type='m1.large',
user_data='#!/bin/bash',
security_groups=['group1'],
spot_price='price',
block_device_mappings=[bdm],
associate_public_ip_address=True,
volume_type='atype',
delete_on_termination=False,
iops=3000,
classic_link_vpc_id='vpc-1234',
classic_link_vpc_security_groups=['classic_link_group']
)
response = self.service_connection.create_launch_configuration(lc)
self.assert_request_parameters({
'Action': 'CreateLaunchConfiguration',
'BlockDeviceMappings.member.1.DeviceName': '/dev/sdf',
'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'false',
'BlockDeviceMappings.member.1.Ebs.SnapshotId': 'snap-12345',
'EbsOptimized': 'false',
'LaunchConfigurationName': 'launch_config',
'ImageId': '123456',
'UserData': base64.b64encode(b'#!/bin/bash').decode('utf-8'),
'InstanceMonitoring.Enabled': 'false',
'InstanceType': 'm1.large',
'SecurityGroups.member.1': 'group1',
'SpotPrice': 'price',
'AssociatePublicIpAddress': 'true',
'VolumeType': 'atype',
'DeleteOnTermination': 'false',
'Iops': 3000,
'ClassicLinkVPCId': 'vpc-1234',
'ClassicLinkVPCSecurityGroups.member.1': 'classic_link_group'
}, ignore_params_values=['Version'])
class TestCreateAutoScalePolicy(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestCreateAutoScalePolicy, self).setUp()
def default_body(self):
return b"""
<PutScalingPolicyResponse xmlns="http://autoscaling.amazonaws.com\
/doc/2011-01-01/">
<PutScalingPolicyResult>
<PolicyARN>arn:aws:autoscaling:us-east-1:803981987763:scaling\
Policy:b0dcf5e8
-02e6-4e31-9719-0675d0dc31ae:autoScalingGroupName/my-test-asg:\
policyName/my-scal
eout-policy</PolicyARN>
</PutScalingPolicyResult>
<ResponseMetadata>
<RequestId>3cfc6fef-c08b-11e2-a697-2922EXAMPLE</RequestId>
</ResponseMetadata>
</PutScalingPolicyResponse>
"""
def test_scaling_policy_with_min_adjustment_step(self):
self.set_http_response(status_code=200)
policy = ScalingPolicy(
name='foo', as_name='bar',
adjustment_type='PercentChangeInCapacity', scaling_adjustment=50,
min_adjustment_step=30)
self.service_connection.create_scaling_policy(policy)
self.assert_request_parameters({
'Action': 'PutScalingPolicy',
'PolicyName': 'foo',
'AutoScalingGroupName': 'bar',
'AdjustmentType': 'PercentChangeInCapacity',
'ScalingAdjustment': 50,
'MinAdjustmentStep': 30
}, ignore_params_values=['Version'])
def test_scaling_policy_with_wrong_adjustment_type(self):
self.set_http_response(status_code=200)
policy = ScalingPolicy(
name='foo', as_name='bar',
adjustment_type='ChangeInCapacity', scaling_adjustment=50,
min_adjustment_step=30)
self.service_connection.create_scaling_policy(policy)
self.assert_request_parameters({
'Action': 'PutScalingPolicy',
'PolicyName': 'foo',
'AutoScalingGroupName': 'bar',
'AdjustmentType': 'ChangeInCapacity',
'ScalingAdjustment': 50
}, ignore_params_values=['Version'])
def test_scaling_policy_without_min_adjustment_step(self):
self.set_http_response(status_code=200)
policy = ScalingPolicy(
name='foo', as_name='bar',
adjustment_type='PercentChangeInCapacity', scaling_adjustment=50)
self.service_connection.create_scaling_policy(policy)
self.assert_request_parameters({
'Action': 'PutScalingPolicy',
'PolicyName': 'foo',
'AutoScalingGroupName': 'bar',
'AdjustmentType': 'PercentChangeInCapacity',
'ScalingAdjustment': 50
}, ignore_params_values=['Version'])
class TestPutNotificationConfiguration(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestPutNotificationConfiguration, self).setUp()
def default_body(self):
return b"""
<PutNotificationConfigurationResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</PutNotificationConfigurationResponse>
"""
def test_autoscaling_group_put_notification_configuration(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='ana', launch_config='lauch_config',
min_size=1, max_size=2,
termination_policies=['OldestInstance', 'OldestLaunchConfiguration'])
self.service_connection.put_notification_configuration(autoscale, 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up', ['autoscaling:EC2_INSTANCE_LAUNCH'])
self.assert_request_parameters({
'Action': 'PutNotificationConfiguration',
'AutoScalingGroupName': 'ana',
'NotificationTypes.member.1': 'autoscaling:EC2_INSTANCE_LAUNCH',
'TopicARN': 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up',
}, ignore_params_values=['Version'])
class TestDeleteNotificationConfiguration(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestDeleteNotificationConfiguration, self).setUp()
def default_body(self):
return b"""
<DeleteNotificationConfigurationResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</DeleteNotificationConfigurationResponse>
"""
def test_autoscaling_group_put_notification_configuration(self):
self.set_http_response(status_code=200)
autoscale = AutoScalingGroup(
name='ana', launch_config='lauch_config',
min_size=1, max_size=2,
termination_policies=['OldestInstance', 'OldestLaunchConfiguration'])
self.service_connection.delete_notification_configuration(autoscale, 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up')
self.assert_request_parameters({
'Action': 'DeleteNotificationConfiguration',
'AutoScalingGroupName': 'ana',
'TopicARN': 'arn:aws:sns:us-east-1:19890506:AutoScaling-Up',
}, ignore_params_values=['Version'])
class TestAutoScalingTag(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
return b"""
<CreateOrUpdateTagsResponse>
<ResponseMetadata>
<RequestId>requestId</RequestId>
</ResponseMetadata>
</CreateOrUpdateTagsResponse>
"""
def test_create_or_update_tags(self):
self.set_http_response(status_code=200)
tags = [
Tag(
connection=self.service_connection,
key='alpha',
value='tango',
resource_id='sg-00000000',
resource_type='auto-scaling-group',
propagate_at_launch=True
),
Tag(
connection=self.service_connection,
key='bravo',
value='sierra',
resource_id='sg-00000000',
resource_type='auto-scaling-group',
propagate_at_launch=False
)]
response = self.service_connection.create_or_update_tags(tags)
self.assert_request_parameters({
'Action': 'CreateOrUpdateTags',
'Tags.member.1.ResourceType': 'auto-scaling-group',
'Tags.member.1.ResourceId': 'sg-00000000',
'Tags.member.1.Key': 'alpha',
'Tags.member.1.Value': 'tango',
'Tags.member.1.PropagateAtLaunch': 'true',
'Tags.member.2.ResourceType': 'auto-scaling-group',
'Tags.member.2.ResourceId': 'sg-00000000',
'Tags.member.2.Key': 'bravo',
'Tags.member.2.Value': 'sierra',
'Tags.member.2.PropagateAtLaunch': 'false'
}, ignore_params_values=['Version'])
def test_endElement(self):
for i in [
('Key', 'mykey', 'key'),
('Value', 'myvalue', 'value'),
('ResourceType', 'auto-scaling-group', 'resource_type'),
('ResourceId', 'sg-01234567', 'resource_id'),
('PropagateAtLaunch', 'true', 'propagate_at_launch')]:
self.check_tag_attributes_set(i[0], i[1], i[2])
def check_tag_attributes_set(self, name, value, attr):
tag = Tag()
tag.endElement(name, value, None)
if value == 'true':
self.assertEqual(getattr(tag, attr), True)
else:
self.assertEqual(getattr(tag, attr), value)
class TestAttachInstances(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestAttachInstances, self).setUp()
def default_body(self):
return b"""
<AttachInstancesResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</AttachInstancesResponse>
"""
def test_attach_instances(self):
self.set_http_response(status_code=200)
self.service_connection.attach_instances(
'autoscale',
['inst2', 'inst1', 'inst4']
)
self.assert_request_parameters({
'Action': 'AttachInstances',
'AutoScalingGroupName': 'autoscale',
'InstanceIds.member.1': 'inst2',
'InstanceIds.member.2': 'inst1',
'InstanceIds.member.3': 'inst4',
}, ignore_params_values=['Version'])
class TestDetachInstances(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestDetachInstances, self).setUp()
def default_body(self):
return b"""
<DetachInstancesResponse>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</DetachInstancesResponse>
"""
def test_detach_instances(self):
self.set_http_response(status_code=200)
self.service_connection.detach_instances(
'autoscale',
['inst2', 'inst1', 'inst4']
)
self.assert_request_parameters({
'Action': 'DetachInstances',
'AutoScalingGroupName': 'autoscale',
'InstanceIds.member.1': 'inst2',
'InstanceIds.member.2': 'inst1',
'InstanceIds.member.3': 'inst4',
'ShouldDecrementDesiredCapacity': 'true',
}, ignore_params_values=['Version'])
def test_detach_instances_with_decrement_desired_capacity(self):
self.set_http_response(status_code=200)
self.service_connection.detach_instances(
'autoscale',
['inst2', 'inst1', 'inst4'],
True
)
self.assert_request_parameters({
'Action': 'DetachInstances',
'AutoScalingGroupName': 'autoscale',
'InstanceIds.member.1': 'inst2',
'InstanceIds.member.2': 'inst1',
'InstanceIds.member.3': 'inst4',
'ShouldDecrementDesiredCapacity': 'true',
}, ignore_params_values=['Version'])
def test_detach_instances_without_decrement_desired_capacity(self):
self.set_http_response(status_code=200)
self.service_connection.detach_instances(
'autoscale',
['inst2', 'inst1', 'inst4'],
False
)
self.assert_request_parameters({
'Action': 'DetachInstances',
'AutoScalingGroupName': 'autoscale',
'InstanceIds.member.1': 'inst2',
'InstanceIds.member.2': 'inst1',
'InstanceIds.member.3': 'inst4',
'ShouldDecrementDesiredCapacity': 'false',
}, ignore_params_values=['Version'])
class TestGetAccountLimits(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestGetAccountLimits, self).setUp()
def default_body(self):
return b"""
<DescribeAccountLimitsAnswer>
<MaxNumberOfAutoScalingGroups>6</MaxNumberOfAutoScalingGroups>
<MaxNumberOfLaunchConfigurations>3</MaxNumberOfLaunchConfigurations>
<ResponseMetadata>
<RequestId>requestid</RequestId>
</ResponseMetadata>
</DescribeAccountLimitsAnswer>
"""
def test_autoscaling_group_put_notification_configuration(self):
self.set_http_response(status_code=200)
limits = self.service_connection.get_account_limits()
self.assert_request_parameters({
'Action': 'DescribeAccountLimits',
}, ignore_params_values=['Version'])
self.assertEqual(limits.max_autoscaling_groups, 6)
self.assertEqual(limits.max_launch_configurations, 3)
class TestGetAdjustmentTypes(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def setUp(self):
super(TestGetAdjustmentTypes, self).setUp()
def default_body(self):
return b"""
<DescribeAdjustmentTypesResponse xmlns="http://autoscaling.amazonaws.com/doc/201-01-01/">
<DescribeAdjustmentTypesResult>
<AdjustmentTypes>
<member>
<AdjustmentType>ChangeInCapacity</AdjustmentType>
</member>
<member>
<AdjustmentType>ExactCapacity</AdjustmentType>
</member>
<member>
<AdjustmentType>PercentChangeInCapacity</AdjustmentType>
</member>
</AdjustmentTypes>
</DescribeAdjustmentTypesResult>
<ResponseMetadata>
<RequestId>requestId</RequestId>
</ResponseMetadata>
</DescribeAdjustmentTypesResponse>
"""
def test_autoscaling_adjustment_types(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_adjustment_types()
self.assert_request_parameters({
'Action': 'DescribeAdjustmentTypes'
}, ignore_params_values=['Version'])
self.assertTrue(isinstance(response, list))
self.assertEqual(response[0].adjustment_type, "ChangeInCapacity")
self.assertEqual(response[1].adjustment_type, "ExactCapacity")
self.assertEqual(response[2].adjustment_type, "PercentChangeInCapacity")
class TestLaunchConfigurationDescribeWithBlockDeviceTypes(AWSMockServiceTestCase):
connection_class = AutoScaleConnection
def default_body(self):
# This is a dummy response
return b"""
<DescribeLaunchConfigurationsResponse>
<DescribeLaunchConfigurationsResult>
<LaunchConfigurations>
<member>
<AssociatePublicIpAddress>true</AssociatePublicIpAddress>
<SecurityGroups/>
<CreatedTime>2013-01-21T23:04:42.200Z</CreatedTime>
<KernelId/>
<LaunchConfigurationName>my-test-lc</LaunchConfigurationName>
<UserData/>
<InstanceType>m1.small</InstanceType>
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc</LaunchConfigurationARN>
<BlockDeviceMappings>
<member>
<DeviceName>/dev/xvdp</DeviceName>
<Ebs>
<SnapshotId>snap-1234abcd</SnapshotId>
<Iops>1000</Iops>
<DeleteOnTermination>true</DeleteOnTermination>
<VolumeType>io1</VolumeType>
<VolumeSize>100</VolumeSize>
</Ebs>
</member>
<member>
<VirtualName>ephemeral1</VirtualName>
<DeviceName>/dev/xvdc</DeviceName>
</member>
<member>
<VirtualName>ephemeral0</VirtualName>
<DeviceName>/dev/xvdb</DeviceName>
</member>
<member>
<DeviceName>/dev/xvdh</DeviceName>
<Ebs>
<Iops>2000</Iops>
<DeleteOnTermination>false</DeleteOnTermination>
<VolumeType>io1</VolumeType>
<VolumeSize>200</VolumeSize>
</Ebs>
</member>
</BlockDeviceMappings>
<ImageId>ami-514ac838</ImageId>
<KeyName/>
<RamdiskId/>
<InstanceMonitoring>
<Enabled>true</Enabled>
</InstanceMonitoring>
<EbsOptimized>false</EbsOptimized>
</member>
</LaunchConfigurations>
</DescribeLaunchConfigurationsResult>
<ResponseMetadata>
<RequestId>d05a22f8-b690-11e2-bf8e-2113fEXAMPLE</RequestId>
</ResponseMetadata>
</DescribeLaunchConfigurationsResponse>
"""
def test_get_all_launch_configurations_with_block_device_types(self):
self.set_http_response(status_code=200)
self.service_connection.use_block_device_types = True
response = self.service_connection.get_all_launch_configurations()
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 1)
self.assertTrue(isinstance(response[0], LaunchConfiguration))
self.assertEqual(response[0].associate_public_ip_address, True)
self.assertEqual(response[0].name, "my-test-lc")
self.assertEqual(response[0].instance_type, "m1.small")
self.assertEqual(response[0].launch_configuration_arn, "arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc")
self.assertEqual(response[0].image_id, "ami-514ac838")
self.assertTrue(isinstance(response[0].instance_monitoring, launchconfig.InstanceMonitoring))
self.assertEqual(response[0].instance_monitoring.enabled, 'true')
self.assertEqual(response[0].ebs_optimized, False)
self.assertEqual(response[0].block_device_mappings['/dev/xvdb'].ephemeral_name, 'ephemeral0')
self.assertEqual(response[0].block_device_mappings['/dev/xvdc'].ephemeral_name, 'ephemeral1')
self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].snapshot_id, 'snap-1234abcd')
self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].delete_on_termination, True)
self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].iops, 1000)
self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].size, 100)
self.assertEqual(response[0].block_device_mappings['/dev/xvdp'].volume_type, 'io1')
self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].delete_on_termination, False)
self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].iops, 2000)
self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].size, 200)
self.assertEqual(response[0].block_device_mappings['/dev/xvdh'].volume_type, 'io1')
self.assert_request_parameters({
'Action': 'DescribeLaunchConfigurations',
}, ignore_params_values=['Version'])
def test_get_all_configuration_limited(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_launch_configurations(max_records=10, names=["my-test1", "my-test2"])
self.assert_request_parameters({
'Action': 'DescribeLaunchConfigurations',
'MaxRecords': 10,
'LaunchConfigurationNames.member.1': 'my-test1',
'LaunchConfigurationNames.member.2': 'my-test2'
}, ignore_params_values=['Version'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ncoghlan/pip | setup.py | 5 | 2853 | import codecs
import os
import re
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
sys.exit(pytest.main(self.test_args))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
tests_require = ['pytest', 'virtualenv>=1.10', 'scripttest>=1.3', 'mock']
setup(
name="pip",
version=find_version("pip", "__init__.py"),
description="The PyPA recommended tool for installing Python packages.",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: PyPy"
],
keywords='easy_install distutils setuptools egg virtualenv',
author='The pip developers',
author_email='python-virtualenv@groups.google.com',
url='https://pip.pypa.io/',
license='MIT',
packages=find_packages(exclude=["contrib", "docs", "tests*", "tasks"]),
package_data={
"pip._vendor.certifi": ["*.pem"],
"pip._vendor.requests": ["*.pem"],
"pip._vendor.distlib._backport": ["sysconfig.cfg"],
"pip._vendor.distlib": ["t32.exe", "t64.exe", "w32.exe", "w64.exe"],
},
entry_points={
"console_scripts": [
"pip=pip:main",
"pip%s=pip:main" % sys.version[:1],
"pip%s=pip:main" % sys.version[:3],
],
},
tests_require=tests_require,
zip_safe=False,
extras_require={
'testing': tests_require,
},
cmdclass={'test': PyTest},
)
| mit |
caoning1985/sqt | src/strategy_py/google/protobuf/json_format.py | 18 | 27443 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format.
Simple usage example:
# Create a proto object and serialize it to a json format string.
message = my_proto_pb2.MyMessage(foo='bar')
json_string = json_format.MessageToJson(message)
# Parse a json format string to proto object.
message = json_format.Parse(json_string, my_proto_pb2.MyMessage())
"""
__author__ = 'jieluo@google.com (Jie Luo)'
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict #PY26
import base64
import json
import math
import re
import six
import sys
from operator import methodcaller
from google.protobuf import descriptor
from google.protobuf import symbol_database
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
_INFINITY = 'Infinity'
_NEG_INFINITY = '-Infinity'
_NAN = 'NaN'
_UNPAIRED_SURROGATE_PATTERN = re.compile(six.u(
r'[\ud800-\udbff](?![\udc00-\udfff])|(?<![\ud800-\udbff])[\udc00-\udfff]'
))
class Error(Exception):
"""Top-level module error for json_format."""
class SerializeToJsonError(Error):
"""Thrown if serialization to JSON fails."""
class ParseError(Error):
"""Thrown in case of parsing error."""
def MessageToJson(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
return printer.ToJsonString(message)
def MessageToDict(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to a JSON dictionary.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A dict representation of the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
# pylint: disable=protected-access
return printer._MessageToJsonObject(message)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
class _Printer(object):
"""JSON format printer for protocol message."""
def __init__(self,
including_default_value_fields=False,
preserving_proto_field_name=False):
self.including_default_value_fields = including_default_value_fields
self.preserving_proto_field_name = preserving_proto_field_name
def ToJsonString(self, message):
js = self._MessageToJsonObject(message)
return json.dumps(js, indent=2)
def _MessageToJsonObject(self, message):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return self._WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)
js = {}
return self._RegularMessageToJsonObject(message, js)
def _RegularMessageToJsonObject(self, message, js):
"""Converts normal message according to Proto3 JSON Specification."""
fields = message.ListFields()
try:
for field, value in fields:
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if _IsMapEntry(field):
# Convert a map field.
v_field = field.message_type.fields_by_name['value']
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = 'true'
else:
recorded_key = 'false'
else:
recorded_key = key
js_map[recorded_key] = self._FieldToJsonObject(
v_field, value[key])
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
js[name] = [self._FieldToJsonObject(field, k)
for k in value]
else:
js[name] = self._FieldToJsonObject(field, value)
# Serialize default value if including_default_value_fields is True.
if self.including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if name in js:
# Skip the field which has been serailized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
'Failed to serialize {0} field: {1}.'.format(field.name, e))
return js
def _FieldToJsonObject(self, field, value):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return self._MessageToJsonObject(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
raise SerializeToJsonError('Enum field contains an integer value '
'which can not mapped to an enum value.')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use base64 Data encoding for bytes
return base64.b64encode(value).decode('utf-8')
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return bool(value)
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return _NEG_INFINITY
else:
return _INFINITY
if math.isnan(value):
return _NAN
return value
def _AnyMessageToJsonObject(self, message):
"""Converts Any message according to Proto3 JSON Specification."""
if not message.ListFields():
return {}
# Must print @type first, use OrderedDict instead of {}
js = OrderedDict()
type_url = message.type_url
js['@type'] = type_url
sub_message = _CreateMessageFromTypeUrl(type_url)
sub_message.ParseFromString(message.value)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
js['value'] = self._WrapperMessageToJsonObject(sub_message)
return js
if full_name in _WKTJSONMETHODS:
js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],
sub_message)(self)
return js
return self._RegularMessageToJsonObject(sub_message, js)
def _GenericMessageToJsonObject(self, message):
"""Converts message according to Proto3 JSON Specification."""
# Duration, Timestamp and FieldMask have ToJsonString method to do the
# convert. Users can also call the method directly.
return message.ToJsonString()
def _ValueMessageToJsonObject(self, message):
"""Converts Value message according to Proto3 JSON Specification."""
which = message.WhichOneof('kind')
# If the Value message is not set treat as null_value when serialize
# to JSON. The parse back result will be different from original message.
if which is None or which == 'null_value':
return None
if which == 'list_value':
return self._ListValueMessageToJsonObject(message.list_value)
if which == 'struct_value':
value = message.struct_value
else:
value = getattr(message, which)
oneof_descriptor = message.DESCRIPTOR.fields_by_name[which]
return self._FieldToJsonObject(oneof_descriptor, value)
def _ListValueMessageToJsonObject(self, message):
"""Converts ListValue message according to Proto3 JSON Specification."""
return [self._ValueMessageToJsonObject(value)
for value in message.values]
def _StructMessageToJsonObject(self, message):
"""Converts Struct message according to Proto3 JSON Specification."""
fields = message.fields
ret = {}
for key in fields:
ret[key] = self._ValueMessageToJsonObject(fields[key])
return ret
def _WrapperMessageToJsonObject(self, message):
return self._FieldToJsonObject(
message.DESCRIPTOR.fields_by_name['value'], message.value)
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == 'google/protobuf/wrappers.proto'
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name))
result[name] = value
return result
def _CreateMessageFromTypeUrl(type_url):
# TODO(jieluo): Should add a way that users can register the type resolver
# instead of the default one.
db = symbol_database.Default()
type_name = type_url.split('/')[-1]
try:
message_descriptor = db.pool.FindMessageTypeByName(type_name)
except KeyError:
raise TypeError(
'Can not find message descriptor by type_url: {0}.'.format(type_url))
message_class = db.GetPrototype(message_descriptor)
return message_class()
def Parse(text, message, ignore_unknown_fields=False):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, six.text_type): text = text.decode('utf-8')
try:
if sys.version_info < (2, 7):
# object_pair_hook is not supported before python2.7
js = json.loads(text)
else:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError('Failed to load JSON: {0}.'.format(str(e)))
return ParseDict(js, message, ignore_unknown_fields)
def ParseDict(js_dict, message, ignore_unknown_fields=False):
"""Parses a JSON dictionary representation into a message.
Args:
js_dict: Dict representation of a JSON message.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
"""
parser = _Parser(ignore_unknown_fields)
parser.ConvertMessage(js_dict, message)
return message
_INT_OR_FLOAT = six.integer_types + (float,)
class _Parser(object):
"""JSON format parser for protocol message."""
def __init__(self,
ignore_unknown_fields):
self.ignore_unknown_fields = ignore_unknown_fields
def ConvertMessage(self, value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message)
def _ConvertFieldValuePair(self, js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
fields_by_json_name = dict((f.json_name, f)
for f in message_descriptor.fields)
for name in js:
try:
field = fields_by_json_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name, None)
if not field:
if self.ignore_unknown_fields:
continue
raise ParseError(
'Message type "{0}" has no field named "{1}".'.format(
message_descriptor.full_name, name))
if name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
# Check no other oneof field is parsed.
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE
and field.message_type.full_name == 'google.protobuf.Value'):
sub_message = getattr(message, field.name)
sub_message.null_value = 0
else:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
self._ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}.'.format(name, value))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for item in value:
sub_message = getattr(message, field.name).add()
# None is a null_value in Value.
if (item is None and
sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
self.ConvertMessage(item, sub_message)
else:
# Repeated scalar field.
for item in value:
if item is None:
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
self.ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError:
raise ParseError('@type is missing when parsing any message.')
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value['value'], sub_message)
elif full_name in _WKTJSONMETHODS:
methodcaller(
_WKTJSONMETHODS[full_name][1], value['value'], sub_message)(self)
else:
del value['@type']
self._ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(self, value, message):
"""Convert a JSON representation into message with FromJsonString."""
# Durantion, Timestamp, FieldMask have FromJsonString method to do the
# convert. Users can also call the method directly.
message.FromJsonString(value)
def _ConvertValueMessage(self, value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
self._ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
self. _ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, six.string_types):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError('Unexpected type for Value message.')
def _ConvertListValueMessage(self, value, message):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, list):
raise ParseError(
'ListValue must be in [] which is {0}.'.format(value))
message.ClearField('values')
for item in value:
self._ConvertValueMessage(item, message.values.add())
def _ConvertStructMessage(self, value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
self._ConvertValueMessage(value[key], message.fields[key])
return
def _ConvertWrapperMessage(self, value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(self, value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1}.'.format(
field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(value[key], getattr(
message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field)
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
if enum_value is None:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float) and not value.is_integer():
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value)
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value))
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value
_WKTJSONMETHODS = {
'google.protobuf.Any': ['_AnyMessageToJsonObject',
'_ConvertAnyMessage'],
'google.protobuf.Duration': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.FieldMask': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.ListValue': ['_ListValueMessageToJsonObject',
'_ConvertListValueMessage'],
'google.protobuf.Struct': ['_StructMessageToJsonObject',
'_ConvertStructMessage'],
'google.protobuf.Timestamp': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.Value': ['_ValueMessageToJsonObject',
'_ConvertValueMessage']
}
| apache-2.0 |
mrunalp/docker | hack/infrastructure/docker-ci/deployment.py | 4 | 6191 | #!/usr/bin/env python
import os, sys, re, json, requests, base64
from subprocess import call
from fabric import api
from fabric.api import cd, run, put, sudo
from os import environ as env
from datetime import datetime
from time import sleep
# Remove SSH private key as it needs more processing
CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','',
env['CONFIG_JSON'], flags=re.DOTALL))
# Populate environment variables
for key in CONFIG:
env[key] = CONFIG[key]
# Load SSH private key
env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
env['CONFIG_JSON'],flags=re.DOTALL)
DROPLET_NAME = env.get('DROPLET_NAME','docker-ci')
TIMEOUT = 120 # Seconds before timeout droplet creation
IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
REGION_ID = 4 # New York 2
SIZE_ID = 62 # memory 2GB
DO_IMAGE_USER = 'root' # Image user on Digital Ocean
API_URL = 'https://api.digitalocean.com/'
DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
DOCKER_CI_PATH = '/docker-ci'
CFG_PATH = '{}/buildbot'.format(DOCKER_CI_PATH)
class DigitalOcean():
def __init__(self, key, client):
'''Set default API parameters'''
self.key = key
self.client = client
self.api_url = API_URL
def api(self, cmd_path, api_arg={}):
'''Make api call'''
api_arg.update({'api_key':self.key, 'client_id':self.client})
resp = requests.get(self.api_url + cmd_path, params=api_arg).text
resp = json.loads(resp)
if resp['status'] != 'OK':
raise Exception(resp['error_message'])
return resp
def droplet_data(self, name):
'''Get droplet data'''
data = self.api('droplets')
data = [droplet for droplet in data['droplets']
if droplet['name'] == name]
return data[0] if data else {}
def json_fmt(data):
'''Format json output'''
return json.dumps(data, sort_keys = True, indent = 2)
do = DigitalOcean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
# Get DROPLET_NAME data
data = do.droplet_data(DROPLET_NAME)
# Stop processing if DROPLET_NAME exists on Digital Ocean
if data:
print ('Droplet: {} already deployed. Not further processing.'
.format(DROPLET_NAME))
exit(1)
# Create droplet
do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
'image_id':IMAGE_ID, 'size_id':SIZE_ID,
'ssh_key_ids':[env['DOCKER_KEY_ID']]})
# Wait for droplet to be created.
start_time = datetime.now()
while (data.get('status','') != 'active' and (
datetime.now()-start_time).seconds < TIMEOUT):
data = do.droplet_data(DROPLET_NAME)
print data['status']
sleep(3)
# Wait for the machine to boot
sleep(15)
# Get droplet IP
ip = str(data['ip_address'])
print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
# Create docker-ci ssh private key so docker-ci docker container can communicate
# with its EC2 instance
os.makedirs('/root/.ssh')
open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY'])
os.chmod('/root/.ssh/id_rsa',0600)
open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
api.env.host_string = ip
api.env.user = DO_IMAGE_USER
api.env.key_filename = '/root/.ssh/id_rsa'
# Correct timezone
sudo('echo "America/Los_Angeles" >/etc/timezone')
sudo('dpkg-reconfigure --frontend noninteractive tzdata')
# Load public docker-ci key
sudo("echo '{}' >> /root/.ssh/authorized_keys".format(env['DOCKER_CI_PUB']))
# Create docker nightly release credentials file
credentials = {
'AWS_ACCESS_KEY': env['PKG_ACCESS_KEY'],
'AWS_SECRET_KEY': env['PKG_SECRET_KEY'],
'GPG_PASSPHRASE': env['PKG_GPG_PASSPHRASE']}
open(DOCKER_CI_PATH + '/nightlyrelease/release_credentials.json', 'w').write(
base64.b64encode(json.dumps(credentials)))
# Transfer docker
sudo('mkdir -p ' + DOCKER_CI_PATH)
sudo('chown {}.{} {}'.format(DO_IMAGE_USER, DO_IMAGE_USER, DOCKER_CI_PATH))
call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
os.path.dirname(DOCKER_CI_PATH)), shell=True)
# Install Docker and Buildbot dependencies
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
sudo('echo deb https://get.docker.io/ubuntu docker main >'
' /etc/apt/sources.list.d/docker.list')
sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
'deb http://us.archive.ubuntu.com/ubuntu/ raring-security main universe\n"'
' > /etc/apt/sources.list; apt-get update')
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
sudo('pip install -r {}/requirements.txt'.format(CFG_PATH))
# Install docker and testing dependencies
sudo('apt-get install -y -q lxc-docker')
sudo('curl -s https://phantomjs.googlecode.com/files/'
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
# Build docker-ci containers
sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
sudo('cd {}; docker build -t docker-ci .'.format(DOCKER_CI_PATH))
sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
DOCKER_CI_PATH))
sudo('cd {}/registry-coverage; docker build -t registry_coverage .'.format(
DOCKER_CI_PATH))
# Download docker-ci testing container
sudo('docker pull mzdaniel/test_docker')
# Setup buildbot
sudo('mkdir /data')
sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'
' {11} {12}'.format(CFG_PATH, DOCKER_PATH, env['BUILDBOT_PWD'],
env['IRC_PWD'], env['IRC_CHANNEL'], env['SMTP_USER'],
env['SMTP_PWD'], env['EMAIL_RCP'], env['REGISTRY_USER'],
env['REGISTRY_PWD'], env['REGISTRY_BUCKET'], env['REGISTRY_ACCESS_KEY'],
env['REGISTRY_SECRET_KEY']))
# Preventively reboot docker-ci daily
sudo('ln -s /sbin/reboot /etc/cron.daily')
| apache-2.0 |
lbishal/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
sebastianlan/wedfairy-api | poll/models.py | 1 | 1124 | from django.db import models
class Poll(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(blank=True, null=True, max_length=255)
message = models.TextField(blank=True, null=True)
select = models.IntegerField(blank=True, null=True)
type = models.IntegerField(blank=True, null=True)
deadline = models.DateField(blank=True, null=True)
created_date = models.DateField(auto_now_add=True)
changed_date = models.DateField(auto_now=True)
class Option(models.Model):
id = models.AutoField(primary_key=True)
poll = models.ForeignKey(Poll)
pos = models.IntegerField()
content = models.CharField(blank=True, null=True, max_length=255)
class Meta:
ordering = ['pos']
index_together = (('poll', 'pos'),)
class Vote(models.Model):
id = models.AutoField(primary_key=True)
poll = models.ForeignKey(Poll)
option = models.ForeignKey(Option)
user_id = models.IntegerField()
avatar = models.CharField(max_length=255)
name = models.CharField(max_length=50)
created_date = models.DateField(auto_now_add=True)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.