repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Anlim/decode-Django | Django-1.5.1/django/core/serializers/__init__.py | 113 | 4000 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
| gpl-2.0 |
mtndesign/myVim | myvim/pyflakes-pathogen/ftplugin/python/pyflakes/setup.py | 37 | 1027 | #!/usr/bin/python
# (c) 2005-2009 Divmod, Inc. See LICENSE file for details
from distutils.core import setup
setup(
name="pyflakes",
license="MIT",
version="0.4.0",
description="passive checker of Python programs",
author="Phil Frost",
maintainer="Moe Aboulkheir",
maintainer_email="moe@divmod.com",
url="http://www.divmod.org/trac/wiki/DivmodPyflakes",
packages=["pyflakes", "pyflakes.scripts", "pyflakes.test"],
scripts=["bin/pyflakes"],
long_description="""Pyflakes is program to analyze Python programs and detect various errors. It
works by parsing the source file, not importing it, so it is safe to use on
modules with side effects. It's also much faster.""",
classifiers=[
"Development Status :: 6 - Mature",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Utilities",
])
| mit |
imply/chuu | tools/win/split_link/graph_dependencies.py | 145 | 2291 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main():
if len(sys.argv) != 2:
print 'usage: %s <output.html>' % sys.argv[0]
return 1
env = os.environ.copy()
env['GYP_GENERATORS'] = 'dump_dependency_json'
print 'Dumping dependencies...'
popen = subprocess.Popen(
['python', 'build/gyp_chromium'],
shell=True, env=env)
popen.communicate()
if popen.returncode != 0:
return popen.returncode
print 'Finding problems...'
popen = subprocess.Popen(
['python', 'tools/gyp-explain.py', '--dot',
'chrome.gyp:browser#', 'core.gyp:webcore#'],
stdout=subprocess.PIPE,
shell=True)
out, _ = popen.communicate()
if popen.returncode != 0:
return popen.returncode
# Break into pairs to uniq to make graph less of a mess.
print 'Simplifying...'
deduplicated = set()
lines = out.splitlines()[2:-1]
for line in lines:
line = line.strip('\r\n ;')
pairs = line.split(' -> ')
for i in range(len(pairs) - 1):
deduplicated.add('%s -> %s;' % (pairs[i], pairs[i + 1]))
graph = 'strict digraph {\n' + '\n'.join(sorted(deduplicated)) + '\n}'
print 'Writing report to %s...' % sys.argv[1]
path_count = len(out.splitlines())
with open(os.path.join(BASE_DIR, 'viz.js', 'viz.js')) as f:
viz_js = f.read()
with open(sys.argv[1], 'w') as f:
f.write(PREFIX % path_count)
f.write(graph)
f.write(SUFFIX % viz_js)
print 'Done.'
PREFIX = r'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Undesirable Dependencies</title>
</head>
<body>
<h1>Undesirable Dependencies</h1>
<h2>browser → webcore</h2>
<h3>%d paths</h3>
<script type="text/vnd.graphviz" id="graph">
'''
SUFFIX = r'''
</script>
<script>%s</script>
<div id="output">Rendering...</div>
<script>
setTimeout(function() {
document.getElementById("output").innerHTML =
Viz(document.getElementById("graph").innerHTML, "svg");
}, 1);
</script>
</body>
</html>
'''
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
bvcompsci/cemetery-map | migrations/env.py | 557 | 2883 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
kanteshraj/ansible | lib/ansible/template/__init__.py | 18 | 19620 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import contextlib
import os
import re
from six import string_types, text_type, binary_type, StringIO
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.utils import concat as j2_concat
from jinja2.runtime import StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
from ansible.plugins import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.debug import debug
from numbers import Number
__all__ = ['Templar']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = ( bool, Number )
JINJA2_OVERRIDE = '#jinja2:'
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\','\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i -1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=dict()):
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = Environment(
trim_blocks=True,
undefined=StrictUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
self.environment.template_class = AnsibleJ2Template
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self.block_start = self.environment.block_start_string
self.block_end = self.environment.block_end_string
self.variable_start = self.environment.variable_start_string
self.variable_end = self.environment.variable_end_string
self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1]))
def _get_filters(self):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
plugins = [x for x in self._filter_loader.all()]
self._filters = dict()
for fp in plugins:
self._filters.update(fp.filters())
self._filters.update(self._get_tests())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
plugins = [x for x in test_loader.all()]
self._tests = dict()
for fp in plugins:
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
def _clean_data(self, orig_data):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, string_types):
return orig_data
with contextlib.closing(StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in self._clean_regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == self.variable_start[0]:
if token == self.block_start:
block_openings.append(token_start)
elif token == self.variable_start:
print_openings.append(token_start)
elif token[1] == self.variable_end[1]:
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
raise AnsibleError("Error while cleaning data for safety: unhandled regex match")
return data.getvalue()
def set_available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods.
'''
assert isinstance(variables, dict)
self._available_variables = variables.copy()
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
# Don't template unsafe variables, instead drop them back down to
# their constituent type.
if hasattr(variable, '__UNSAFE__'):
if isinstance(variable, text_type):
return self._clean_data(text_type(variable))
elif isinstance(variable, binary_type):
return self._clean_data(bytes(variable))
else:
return self._clean_data(variable._obj)
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
result = variable
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides)
if convert_data:
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
#return self._clean_data(result)
return result
elif isinstance(variable, (list, tuple)):
return [self.template(v, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
elif isinstance(variable, dict):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
d[k] = self.template(variable[k], preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def _contains_vars(self, data):
'''
returns True if the data contains a variable pattern
'''
return self.environment.block_start_string in data or self.environment.variable_start_string in data
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned
'''
return thing if thing is not None else ''
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
raise
ran = None
if ran:
from ansible.vars.unsafe_proxy import UnsafeProxy
ran = UnsafeProxy(",".join(ran))
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def _do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None):
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol+1:]
for pair in line.split(','):
(key,val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
#FIXME: add tests
myenv.filters.update(self._get_filters())
myenv.tests.update(self._get_tests())
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\"
# instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s" % str(e))
except Exception as e:
if 'recursion' in str(e):
raise AnsibleError("recursive loop detected in template string: %s" % data)
else:
return data
t.globals['lookup'] = self._lookup
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
except TypeError as te:
if 'StrictUndefined' in str(te):
raise AnsibleUndefinedVariable(
"Unable to look up a name or access an attribute in template string. " + \
"Make sure your variable name does not contain invalid characters like '-'."
)
else:
debug("failing because of a type error, template data is: %s" % data)
raise AnsibleError("an unexpected type error occurred. Error was %s" % te)
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# jinja2 added a keep_trailing_newline option in 2.7 when
# creating an Environment. That would let us make this code
# better (remove a single newline if
# preserve_trailing_newlines is False). Once we can depend on
# that version being present, modify our code to set that when
# initializing self.environment and remove a single trailing
# newline here if preserve_newlines is False.
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += '\n' * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
#TODO: return warning about undefined var
return data
| gpl-3.0 |
hugdiniz/anuarioDjango | yearbook/migrations/0005_auto_20141214_0017.py | 1 | 1444 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yearbook', '0004_auto_20141212_1558'),
]
operations = [
migrations.RemoveField(
model_name='lotacao',
name='comentarios',
),
migrations.AddField(
model_name='pessoa',
name='historico',
field=models.ManyToManyField(related_name=b'lotacoes_anteriores', to='yearbook.Lotacao', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='unidade_organizacional',
name='localidade_sala',
field=models.ForeignKey(blank=True, to='yearbook.Sala', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='lotacao',
name='funcao',
field=models.ForeignKey(blank=True, to='yearbook.Funcao', null=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_fim',
field=models.DateTimeField(null=True, verbose_name=b'fim das ferias', blank=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_inicio',
field=models.DateTimeField(null=True, verbose_name=b'inicio das ferias', blank=True),
),
]
| gpl-2.0 |
stevenmirabito/csh-asterisk | pystrix/ami/dahdi_events.py | 3 | 3742 | """
pystrix.ami.dahdi_events
========================
Provides defnitions and filtering rules for events that may be raised by Asterisk's DAHDI module.
Legal
-----
This file is part of pystrix.
pystrix is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU General Public License and
GNU Lesser General Public License along with this program. If not, see
<http://www.gnu.org/licenses/>.
(C) Ivrnet, inc., 2011
Authors:
- Neil Tallim <n.tallim@ivrnet.com>
The events implemented by this module follow the definitions provided by
http://www.asteriskdocs.org/ and https://wiki.asterisk.org/
"""
from ami import (_Aggregate, _Event)
import generic_transforms
class DAHDIShowChannels(_Event):
"""
Describes the current state of a DAHDI channel.
Yes, the event's name is pluralised.
- 'AccountCode': unknown (not present if the DAHDI channel is down)
- 'Alarm': unknown
- 'Channel': The channel being described (not present if the DAHDI channel is down)
- 'Context': The Asterisk context associated with the channel
- 'DAHDIChannel': The ID of the DAHDI channel
- 'Description': unknown
- 'DND': 'Disabled' or 'Enabled'
- 'Signalling': A lexical description of the current signalling state
- 'SignallingCode': A numeric description of the current signalling state
- 'Uniqueid': unknown (not present if the DAHDI channel is down)
"""
def process(self):
"""
Translates the 'DND' header's value into a bool.
Translates the 'DAHDIChannel' and 'SignallingCode' headers' values into ints, or -1 on
failure.
"""
(headers, data) = _Event.process(self)
generic_transforms.to_bool(headers, ('DND',), truth_value='Enabled')
generic_transforms.to_int(headers, ('DAHDIChannel', 'SignallingCode',), -1)
return (headers, data)
class DAHDIShowChannelsComplete(_Event):
"""
Indicates that all DAHDI channels have been described.
- 'Items': The number of items returned prior to this event
"""
def process(self):
"""
Translates the 'Items' header's value into an int, or -1 on failure.
"""
(headers, data) = _Event.process(self)
generic_transforms.to_int(headers, ('Items',), -1)
return (headers, data)
#List-aggregation events
####################################################################################################
#These define non-Asterisk-native event-types that collect multiple events (cases where multiple
#events are generated in response to a single action) and emit the bundle as a single message.
class DAHDIShowChannels_Aggregate(_Aggregate):
"""
Emitted after all DAHDI channels have been enumerated in response to a DAHDIShowChannels
request.
Its members consist of DAHDIShowChannels events.
It is finalised by DAHDIShowChannelsComplete.
"""
_name = "DAHDIShowChannels_Aggregate"
_aggregation_members = (DAHDIShowChannels,)
_aggregation_finalisers = (DAHDIShowChannelsComplete,)
def _finalise(self, event):
self._check_list_items_count(event, 'Items')
return _Aggregate._finalise(self, event)
| apache-2.0 |
hargup/sympy | sympy/physics/vector/tests/test_point.py | 40 | 3940 | from sympy.physics.vector import dynamicsymbols, Point, ReferenceFrame
from sympy.utilities.pytest import raises
def test_point_v1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.y
O.set_vel(N, N.x)
assert P.v1pt_theory(O, N, B) == N.x + qd * B.y
P.set_vel(B, B.z)
assert P.v1pt_theory(O, N, B) == B.z + N.x + qd * B.y
def test_point_a1pt_theorys():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, qd * B.z)
O = Point('O')
P = O.locatenew('P', B.x)
P.set_vel(B, 0)
O.set_vel(N, 0)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y
P.set_vel(B, q2d * B.z)
assert P.a1pt_theory(O, N, B) == -(qd**2) * B.x + qdd * B.y + q2dd * B.z
O.set_vel(N, q2d * B.x)
assert P.a1pt_theory(O, N, B) == ((q2dd - qd**2) * B.x + (q2d * qd + qdd) * B.y +
q2dd * B.z)
def test_point_v2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.v2pt_theory(O, N, B) == 0
P = O.locatenew('P', B.x)
assert P.v2pt_theory(O, N, B) == (qd * B.z ^ B.x)
O.set_vel(N, N.x)
assert P.v2pt_theory(O, N, B) == N.x + qd * B.y
def test_point_a2pt_theorys():
q = dynamicsymbols('q')
qd = dynamicsymbols('q', 1)
qdd = dynamicsymbols('q', 2)
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 0)
O.set_vel(N, 0)
assert P.a2pt_theory(O, N, B) == 0
P.set_pos(O, B.x)
assert P.a2pt_theory(O, N, B) == (-qd**2) * B.x + (qdd) * B.y
def test_point_funcs():
q, q2 = dynamicsymbols('q q2')
qd, q2d = dynamicsymbols('q q2', 1)
qdd, q2dd = dynamicsymbols('q q2', 2)
N = ReferenceFrame('N')
B = ReferenceFrame('B')
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
assert P.pos_from(O) == q * B.x
P.set_vel(B, qd * B.x + q2d * B.y)
assert P.vel(B) == qd * B.x + q2d * B.y
O.set_vel(N, 0)
assert O.vel(N) == 0
assert P.a1pt_theory(O, N, B) == ((-25 * q + qdd) * B.x + (q2dd) * B.y +
(-10 * qd) * B.z)
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * B.x)
O.set_vel(N, 5 * N.x)
assert O.vel(N) == 5 * N.x
assert P.a2pt_theory(O, N, B) == (-10 * qd**2) * B.x + (10 * qdd) * B.y
B.set_ang_vel(N, 5 * B.y)
O = Point('O')
P = O.locatenew('P', q * B.x)
P.set_vel(B, qd * B.x + q2d * B.y)
O.set_vel(N, 0)
assert P.v1pt_theory(O, N, B) == qd * B.x + q2d * B.y - 5 * q * B.z
def test_point_pos():
q = dynamicsymbols('q')
N = ReferenceFrame('N')
B = N.orientnew('B', 'Axis', [q, N.z])
O = Point('O')
P = O.locatenew('P', 10 * N.x + 5 * B.x)
assert P.pos_from(O) == 10 * N.x + 5 * B.x
Q = P.locatenew('Q', 10 * N.y + 5 * B.y)
assert Q.pos_from(P) == 10 * N.y + 5 * B.y
assert Q.pos_from(O) == 10 * N.x + 10 * N.y + 5 * B.x + 5 * B.y
assert O.pos_from(Q) == -10 * N.x - 10 * N.y - 5 * B.x - 5 * B.y
def test_point_partial_velocity():
N = ReferenceFrame('N')
A = ReferenceFrame('A')
p = Point('p')
u1, u2 = dynamicsymbols('u1, u2')
p.set_vel(N, u1 * A.x + u2 * N.y)
assert p.partial_velocity(N, u1) == A.x
assert p.partial_velocity(N, u1, u2) == (A.x, N.y)
raises(ValueError, lambda: p.partial_velocity(A, u1))
| bsd-3-clause |
kitelightning/UnrealEnginePython | examples/fbx_curves_extractor.py | 3 | 2909 | from unreal_engine import FbxManager, FbxIOSettings, FbxImporter, FbxScene
import unreal_engine as ue
class FbxCurvesExtractor:
def __init__(self, filename):
self.manager = FbxManager()
io_settings = FbxIOSettings(self.manager, 'IOSROOT')
self.manager.set_io_settings(io_settings)
importer = FbxImporter(self.manager, 'importer')
importer.initialize(filename, io_settings)
self.scene = FbxScene(self.manager, 'scene')
importer._import(self.scene)
def get_objects_by_class(self, name):
objects = []
for i in range(0, self.scene.get_src_object_count()):
obj = self.scene.get_src_object(i)
if obj.get_class_name() == name:
objects.append(obj)
return objects
def get_members_by_class(self, parent, name):
members = []
for i in range(0, parent.get_member_count()):
member = parent.get_member(i)
if member.get_class_name() == name:
members.append(member)
return members
def get_anim_stacks(self):
return self.get_objects_by_class('FbxAnimStack')
def get_anim_layers(self, stack):
return self.get_members_by_class(stack, 'FbxAnimLayer')
def get_properties(self, obj):
prop = obj.get_first_property()
while prop:
yield prop
prop = obj.get_next_property(prop)
def get_anim_curves(self, layer):
curves = []
for i in range(0, self.scene.get_src_object_count()):
obj = self.scene.get_src_object(i)
# retrieve object properties
for prop in self.get_properties(obj):
curve_node = prop.get_curve_node(layer)
if not curve_node:
continue
channels = []
for chan_num in range(0, curve_node.get_channels_count()):
# always get the first curve
curve = curve_node.get_curve(chan_num, 0)
if not curve:
continue
keys = []
for key_id in range(0, curve.key_get_count()):
keys.append((curve.key_get_seconds(key_id), curve.key_get_value(key_id)))
channels.append({'name': curve_node.get_channel_name(chan_num), 'keys': keys})
curves.append({'object': obj.get_name(), 'class': obj.get_class_name(), 'property': prop.get_name(), 'channels': channels})
return curves
filename = 'C:/Users/roberto/Downloads/testCam/testCam.fbx'
#filename = 'C:/Users/Roberto/Desktop/Kaiju_Assets/Slicer/Animations/slicer_attack.fbx'
extractor = FbxCurvesExtractor(filename)
for stack in extractor.get_anim_stacks():
for layer in extractor.get_anim_layers(stack):
for curve in extractor.get_anim_curves(layer):
ue.log(curve) | mit |
nuuuboo/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
rghe/ansible | test/units/modules/network/f5/test_bigip_profile_dns.py | 10 | 4948 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_dns import ApiParameters
from library.modules.bigip_profile_dns import ModuleParameters
from library.modules.bigip_profile_dns import ModuleManager
from library.modules.bigip_profile_dns import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_profile_dns import ApiParameters
from ansible.modules.network.f5.bigip_profile_dns import ModuleParameters
from ansible.modules.network.f5.bigip_profile_dns import ModuleManager
from ansible.modules.network.f5.bigip_profile_dns import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
enable_dns_express=True,
enable_zone_transfer=True,
enable_dnssec=True,
enable_gtm=True,
process_recursion_desired=True,
use_local_bind=True,
enable_dns_firewall=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.enable_dns_express is True
assert p.enable_zone_transfer is True
assert p.enable_dnssec is True
assert p.enable_gtm is True
assert p.process_recursion_desired is True
assert p.use_local_bind is True
assert p.enable_dns_firewall is True
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_dns_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/dns'
assert p.enable_dns_express is False
assert p.enable_zone_transfer is True
assert p.enable_dnssec is False
assert p.enable_gtm is False
assert p.process_recursion_desired is True
assert p.use_local_bind is False
assert p.enable_dns_firewall is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
enable_dns_express=True,
enable_zone_transfer=True,
enable_dnssec=True,
enable_gtm=True,
process_recursion_desired=True,
use_local_bind=True,
enable_dns_firewall=True,
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enable_dns_express'] == 'yes'
assert results['enable_zone_transfer'] == 'yes'
assert results['enable_dnssec'] == 'yes'
assert results['enable_gtm'] == 'yes'
assert results['process_recursion_desired'] == 'yes'
assert results['use_local_bind'] == 'yes'
assert results['enable_dns_firewall'] == 'yes'
| gpl-3.0 |
belmiromoreira/nova | nova/scheduler/filters/retry_filter.py | 58 | 1587 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling
purposes
"""
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)
if not retry:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled")
return True
hosts = retry.get('hosts', [])
host = [host_state.host, host_state.nodename]
passes = host not in hosts
if not passes:
LOG.debug("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s", {'host': host, 'hosts': hosts})
# Host passes if it's not in the list of previously attempted hosts:
return passes
| apache-2.0 |
gnowxilef/youtube-dl | youtube_dl/extractor/mdr.py | 58 | 6855 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
determine_ext,
int_or_none,
parse_duration,
parse_iso8601,
xpath_text,
)
class MDRIE(InfoExtractor):
IE_DESC = 'MDR.DE and KiKA'
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?\.html'
_TESTS = [{
# MDR regularly deletes its videos
'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True,
}, {
# audio
'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html',
'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa',
'info_dict': {
'id': '1312272',
'ext': 'mp3',
'title': 'Feuilleton vom 30. Oktober 2015',
'duration': 250,
'uploader': 'MITTELDEUTSCHER RUNDFUNK',
},
'skip': '404 not found',
}, {
'url': 'http://www.kika.de/baumhaus/videos/video19636.html',
'md5': '4930515e36b06c111213e80d1e4aad0e',
'info_dict': {
'id': '19636',
'ext': 'mp4',
'title': 'Baumhaus vom 30. Oktober 2015',
'duration': 134,
'uploader': 'KIKA',
},
'skip': '404 not found',
}, {
'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html',
'md5': '5fe9c4dd7d71e3b238f04b8fdd588357',
'info_dict': {
'id': '8182',
'ext': 'mp4',
'title': 'Beutolomäus und der geheime Weihnachtswunsch',
'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd',
'timestamp': 1482541200,
'upload_date': '20161224',
'duration': 4628,
'uploader': 'KIKA',
},
}, {
# audio with alternative playerURL pattern
'url': 'http://www.mdr.de/kultur/videos-und-audios/audio-radio/operation-mindfuck-robert-wilson100.html',
'info_dict': {
'id': '100',
'ext': 'mp4',
'title': 'Feature: Operation Mindfuck - Robert Anton Wilson',
'duration': 3239,
'uploader': 'MITTELDEUTSCHER RUNDFUNK',
},
}, {
'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html',
'only_matching': True,
}, {
'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html',
'only_matching': True,
}, {
'url': 'http://www.mdr.de/mediathek/mdr-videos/a/video-1334.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data_url = self._search_regex(
r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+?-avCustom\.xml)\1',
webpage, 'data url', group='url').replace(r'\/', '/')
doc = self._download_xml(
compat_urlparse.urljoin(url, data_url), video_id)
title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True)
formats = []
processed_urls = []
for asset in doc.findall('./assets/asset'):
for source in (
'progressiveDownload',
'dynamicHttpStreamingRedirector',
'adaptiveHttpStreamingRedirector'):
url_el = asset.find('./%sUrl' % source)
if url_el is None:
continue
video_url = url_el.text
if video_url in processed_urls:
continue
processed_urls.append(video_url)
vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
ext = determine_ext(url_el.text)
if ext == 'm3u8':
url_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
preference=0, m3u8_id='HLS', fatal=False)
elif ext == 'f4m':
url_formats = self._extract_f4m_formats(
video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id,
preference=0, f4m_id='HDS', fatal=False)
else:
media_type = xpath_text(asset, './mediaType', 'media type', default='MP4')
vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
filesize = int_or_none(xpath_text(asset, './fileSize', 'file size'))
f = {
'url': video_url,
'format_id': '%s-%d' % (media_type, vbr or abr),
'filesize': filesize,
'abr': abr,
'preference': 1,
}
if vbr:
width = int_or_none(xpath_text(asset, './frameWidth', 'width'))
height = int_or_none(xpath_text(asset, './frameHeight', 'height'))
f.update({
'vbr': vbr,
'width': width,
'height': height,
})
url_formats = [f]
if not url_formats:
continue
if not vbr:
for f in url_formats:
abr = f.get('tbr') or abr
if 'tbr' in f:
del f['tbr']
f.update({
'abr': abr,
'vcodec': 'none',
})
formats.extend(url_formats)
self._sort_formats(formats)
description = xpath_text(doc, './broadcast/broadcastDescription', 'description')
timestamp = parse_iso8601(
xpath_text(
doc, [
'./broadcast/broadcastDate',
'./broadcast/broadcastStartDate',
'./broadcast/broadcastEndDate'],
'timestamp', default=None))
duration = parse_duration(xpath_text(doc, './duration', 'duration'))
uploader = xpath_text(doc, './rights', 'uploader')
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'uploader': uploader,
'formats': formats,
}
| unlicense |
MeteorKepler/RICGA | ricga/eval_tools/pycocoevalcap/tokenizer/ptbtokenizer.py | 1 | 2833 | #!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import subprocess
import tempfile
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| apache-2.0 |
lmprice/ansible | test/units/playbook/role/test_role.py | 12 | 11071 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role import hash_params
class TestHashParams(unittest.TestCase):
def test(self):
params = {'foo': 'bar'}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def _assert_hashable(self, res):
a_dict = {}
try:
a_dict[res] = res
except TypeError as e:
self.fail('%s is not hashable: %s' % (res, e))
def _assert_set(self, res):
self.assertIsInstance(res, frozenset)
def test_dict_tuple(self):
params = {'foo': (1, 'bar',)}
res = hash_params(params)
self._assert_set(res)
def test_tuple(self):
params = (1, None, 'foo')
res = hash_params(params)
self._assert_hashable(res)
def test_tuple_dict(self):
params = ({'foo': 'bar'}, 37)
res = hash_params(params)
self._assert_hashable(res)
def test_list(self):
params = ['foo', 'bar', 1, 37, None]
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_dict_with_list_value(self):
params = {'foo': [1, 4, 'bar']}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_empty_set(self):
params = set([])
res = hash_params(params)
self._assert_hashable(res)
self._assert_set(res)
def test_generator(self):
def my_generator():
for i in ['a', 1, None, {}]:
yield i
params = my_generator()
res = hash_params(params)
self._assert_hashable(res)
def test_container_but_not_iterable(self):
# This is a Container that is not iterable, which is unlikely but...
class MyContainer(collections.Container):
def __init__(self, some_thing):
self.data = []
self.data.append(some_thing)
def __contains__(self, item):
return item in self.data
def __hash__(self):
return hash(self.data)
def __len__(self):
return len(self.data)
def __call__(self):
return False
foo = MyContainer('foo bar')
params = foo
self.assertRaises(TypeError, hash_params, params)
class TestRole(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_tasks(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_tasks/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(str(r), 'foo_tasks')
self.assertEqual(len(r._task_blocks), 1)
assert isinstance(r._task_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_handlers(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_handlers/handlers/main.yml": """
- name: test handler
shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(len(r._handler_blocks), 1)
assert isinstance(r._handler_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_dirs(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main/foo.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main/bar.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_nested_dirs(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main/bar/foo.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_dir_vs_file(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/vars/main/foo.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_metadata(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo_metadata/meta/main.yml': """
allow_duplicates: true
dependencies:
- bar_metadata
galaxy_info:
a: 1
b: 2
c: 3
""",
'/etc/ansible/roles/bar_metadata/meta/main.yml': """
dependencies:
- baz_metadata
""",
'/etc/ansible/roles/baz_metadata/meta/main.yml': """
dependencies:
- bam_metadata
""",
'/etc/ansible/roles/bam_metadata/meta/main.yml': """
dependencies: []
""",
'/etc/ansible/roles/bad1_metadata/meta/main.yml': """
1
""",
'/etc/ansible/roles/bad2_metadata/meta/main.yml': """
foo: bar
""",
'/etc/ansible/roles/recursive1_metadata/meta/main.yml': """
dependencies: ['recursive2_metadata']
""",
'/etc/ansible/roles/recursive2_metadata/meta/main.yml': """
dependencies: ['recursive1_metadata']
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
role_deps = r.get_direct_dependencies()
self.assertEqual(len(role_deps), 1)
self.assertEqual(type(role_deps[0]), Role)
self.assertEqual(len(role_deps[0].get_parents()), 1)
self.assertEqual(role_deps[0].get_parents()[0], r)
self.assertEqual(r._metadata.allow_duplicates, True)
self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3))
all_deps = r.get_all_dependencies()
self.assertEqual(len(all_deps), 3)
self.assertEqual(all_deps[0].get_name(), 'bam_metadata')
self.assertEqual(all_deps[1].get_name(), 'baz_metadata')
self.assertEqual(all_deps[2].get_name(), 'bar_metadata')
i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleError, Role.load, i, play=mock_play)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_complex(self):
# FIXME: add tests for the more complex uses of
# params and tags/when statements
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_complex/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r.get_name(), "foo_complex")
| gpl-3.0 |
scottdangelo/cinderclient-api-microversions | cinderclient/v2/qos_specs.py | 2 | 4789 | # Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient import base
class QoSSpecs(base.Resource):
"""QoS specs entity represents quality-of-service parameters/requirements.
A QoS specs is a set of parameters or requirements for quality-of-service
purpose, which can be associated with volume types (for now). In future,
QoS specs may be extended to be associated other entities, such as single
volume.
"""
def __repr__(self):
return "<QoSSpecs: %s>" % self.name
def delete(self):
return self.manager.delete(self)
class QoSSpecsManager(base.ManagerWithFind):
"""
Manage :class:`QoSSpecs` resources.
"""
resource_class = QoSSpecs
def list(self, search_opts=None):
"""Get a list of all qos specs.
:rtype: list of :class:`QoSSpecs`.
"""
return self._list("/qos-specs", "qos_specs")
def get(self, qos_specs):
"""Get a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to get.
:rtype: :class:`QoSSpecs`
"""
return self._get("/qos-specs/%s" % base.getid(qos_specs), "qos_specs")
def delete(self, qos_specs, force=False):
"""Delete a specific qos specs.
:param qos_specs: The ID of the :class:`QoSSpecs` to be removed.
:param force: Flag that indicates whether to delete target qos specs
if it was in-use.
"""
self._delete("/qos-specs/%s?force=%s" %
(base.getid(qos_specs), force))
def create(self, name, specs):
"""Create a qos specs.
:param name: Descriptive name of the qos specs, must be unique
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {
"name": name,
}
}
body["qos_specs"].update(specs)
return self._create("/qos-specs", body, "qos_specs")
def set_keys(self, qos_specs, specs):
"""Add/Update keys in qos specs.
:param qos_specs: The ID of qos specs
:param specs: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
body = {
"qos_specs": {}
}
body["qos_specs"].update(specs)
return self._update("/qos-specs/%s" % qos_specs, body)
def unset_keys(self, qos_specs, specs):
"""Remove keys from a qos specs.
:param qos_specs: The ID of qos specs
:param specs: A list of key to be unset
:rtype: :class:`QoSSpecs`
"""
body = {'keys': specs}
return self._update("/qos-specs/%s/delete_keys" % qos_specs,
body)
def get_associations(self, qos_specs):
"""Get associated entities of a qos specs.
:param qos_specs: The id of the :class: `QoSSpecs`
:return: a list of entities that associated with specific qos specs.
"""
return self._list("/qos-specs/%s/associations" % base.getid(qos_specs),
"qos_associations")
def associate(self, qos_specs, vol_type_id):
"""Associate a volume type with specific qos specs.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/associate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate?vol_type_id=%s" %
(base.getid(qos_specs), vol_type_id))
def disassociate_all(self, qos_specs):
"""Disassociate all entities from specific qos specs.
:param qos_specs: The qos specs to be associated with
"""
self.api.client.get("/qos-specs/%s/disassociate_all" %
base.getid(qos_specs))
| apache-2.0 |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/ARB/robustness_isolation.py | 1 | 1333 | '''OpenGL extension ARB.robustness_isolation
This module customises the behaviour of the
OpenGL.raw.GL.ARB.robustness_isolation to provide a more
Python-friendly API
Overview (from the spec)
GL_ARB_robustness and supporting window system extensions allow
creating an OpenGL context supporting graphics reset notification
behavior. GL_ARB_robustness_isolation provides stronger
guarantees about the possible side-effects of a graphics reset.
It is expected that there may be a performance cost associated
with isolating an application or share group from other contexts
on the GPU. For this reason, GL_ARB_robustness_isolation is
phrased as an opt-in mechanism, with a new context creation bit
defined in the window system bindings. It is expected that
implementations might only advertise the strings in this extension
if both the implementation supports the desired isolation
properties, and the context was created with the appropriate reset
isolation bit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/robustness_isolation.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.robustness_isolation import *
### END AUTOGENERATED SECTION | bsd-2-clause |
guoxiaoyong/simple-useful | cxx_learn/cronx/spider/spider_daily_ftse100.py | 2 | 2199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2;
import re;
import string;
import sys;
from BeautifulSoup import BeautifulSoup
month_num = {
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
''
};
def process_date(raw_date):
global month_num;
raw_list=raw_date.split(' ');
month_str=month_num[raw_list[0]];
day_list=raw_list[1].split(',');
if len(day_list[0]) == 1:
day_str = '0' + day_list[0];
else:
day_str = day_list[0];
year_str = raw_list[2];
return year_str + '-' + month_str + '-' + day_str;
def process_num(raw_num):
raw_list=raw_num.split(',');
sz = len(raw_list);
str_num=raw_list[0];
for i in range(1,sz):
str_num = str_num+raw_list[i];
return str_num;
str_url = "http://finance.yahoo.com/q/hp?s=%5EFTSE+Historical+Prices";
req=urllib2.Request(str_url);
resp=urllib2.urlopen(req);
respHtml=resp.read();
HtmlEncoding = "UTF-8";
soup = BeautifulSoup(respHtml, fromEncoding=HtmlEncoding);
tag_top = soup.find('table', {"class":"yfnc_datamodoutline1"});
tag_body = tag_top.contents[0].contents[0].contents[0];
str_date = process_date(tag_body.contents[1].contents[0].contents[0]);
open_price = process_num(tag_body.contents[1].contents[1].contents[0]);
high_price = process_num(tag_body.contents[1].contents[2].contents[0]);
low_price = process_num(tag_body.contents[1].contents[3].contents[0]);
close_price = process_num(tag_body.contents[1].contents[4].contents[0]);
volume = process_num(tag_body.contents[1].contents[5].contents[0]);
if volume != "0":
daily_file = sys.argv[1];
history_file = sys.argv[2];
daily_fp = open(daily_file, 'w');
history_fp = open(history_file, 'a');
title_str = "Date,Open Price,High Price,Low Price,Close Price,Volume(GBP)\n";
daily_fp.write(title_str);
day_market_data = str_date+","+open_price+","+high_price+","+low_price+","+close_price+","+volume+'\n';
daily_fp.write(day_market_data);
history_fp.write(day_market_data);
daily_fp.close();
history_fp.close();
| cc0-1.0 |
skeuomorf/cryptography | src/cryptography/hazmat/backends/commoncrypto/hashes.py | 61 | 2040 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import hashes
@utils.register_interface(hashes.HashContext)
class _HashContext(object):
def __init__(self, backend, algorithm, ctx=None):
self._algorithm = algorithm
self._backend = backend
if ctx is None:
try:
methods = self._backend._hash_mapping[self.algorithm.name]
except KeyError:
raise UnsupportedAlgorithm(
"{0} is not a supported hash on this backend.".format(
algorithm.name),
_Reasons.UNSUPPORTED_HASH
)
ctx = self._backend._ffi.new(methods.ctx)
res = methods.hash_init(ctx)
assert res == 1
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def copy(self):
methods = self._backend._hash_mapping[self.algorithm.name]
new_ctx = self._backend._ffi.new(methods.ctx)
# CommonCrypto has no APIs for copying hashes, so we have to copy the
# underlying struct.
new_ctx[0] = self._ctx[0]
return _HashContext(self._backend, self.algorithm, ctx=new_ctx)
def update(self, data):
methods = self._backend._hash_mapping[self.algorithm.name]
res = methods.hash_update(self._ctx, data, len(data))
assert res == 1
def finalize(self):
methods = self._backend._hash_mapping[self.algorithm.name]
buf = self._backend._ffi.new("unsigned char[]",
self.algorithm.digest_size)
res = methods.hash_final(buf, self._ctx)
assert res == 1
return self._backend._ffi.buffer(buf)[:]
| bsd-3-clause |
YAmikep/django-xmlmapping | setup.py | 1 | 3687 | """
Based entirely on Django's own ``setup.py`` for now.
"""
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
xmlmapping_dir = 'xmlmapping'
for dirpath, dirnames, filenames in os.walk(xmlmapping_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('xmlmapping').get_version()
setup(
name = "django-xmlmapping",
version = version,
url = 'https://github.com/YAmikep/django-xmlmapping',
author = 'Michael Palumbo',
author_email = 'michael.palumbo87@gmail.com',
description = 'Library to map XML data to a Django data model and persist the data in the data base.',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Topic :: Internet'
],
)
| bsd-3-clause |
aewallin/openvoronoi | python_examples/line-segment/lineseg_3.py | 1 | 4439 | import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
def drawLine(myscreen, p1, p2):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=ovdvtk.yellow))
def writeFrame(w2if, lwr, n):
w2if.Modified()
current_dir = os.getcwd()
filename = current_dir + "/frames/vd500_zoomout" + ('%05d' % n) + ".png"
lwr.SetFileName(filename)
# lwr.Write()
def regularGridGenerators(far, Nmax):
# REGULAR GRID
rows = int(math.sqrt(Nmax))
print "rows= ", rows
gpos = [-0.7 * far, 1.4 * far / float(rows - 1)] # start, stride
plist = []
for n in range(rows):
for m in range(rows):
x = gpos[0] + gpos[1] * n
y = gpos[0] + gpos[1] * m
# rotation
# alfa = 0
# xt=x
# yt=y
# x = xt*math.cos(alfa)-yt*math.sin(alfa)
# y = xt*math.sin(alfa)+yt*math.cos(alfa)
plist.append(ovd.Point(x, y))
random.shuffle(plist)
return plist
def randomGenerators(far, Nmax):
pradius = (1.0 / math.sqrt(2)) * far
plist = []
for n in range(Nmax):
x = -pradius + 2 * pradius * random.random()
y = -pradius + 2 * pradius * random.random()
plist.append(ovd.Point(x, y))
return plist
def circleGenerators(far, Nmax):
# POINTS ON A CIRCLE
# """
# cpos=[50,50]
# npts = 100
dalfa = float(2 * math.pi) / float(Nmax - 1)
# dgamma= 10*2*math.pi/npts
# alfa=0
# ofs=10
plist = []
radius = 0.81234 * float(far)
for n in range(Nmax):
x = float(radius) * math.cos(float(n) * float(dalfa))
y = float(radius) * math.sin(float(n) * float(dalfa))
plist.append(ovd.Point(x, y))
# random.shuffle(plist)
return plist
if __name__ == "__main__":
# print ocl.revision()
myscreen = ovdvtk.VTKScreen(width=1024, height=720) # (width=1920, height=1080)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 4
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
# vod.clearance_disk=1
vod.vertexRadius = 0.005
vod.textScale = 0.02
Nmax = 20
plist = randomGenerators(far, Nmax)
# plist = regularGridGenerators(far, Nmax)
# plist = circleGenerators(far, Nmax)
# plist = randomGenerators(far, Nmax)
# plist = []
# plist.append( ovd.Point(0.0,0.1) )
# plist.append( ovd.Point(0,0.9) )
# plist.append( ovd.Point(-0.15, -0.15) )
# + regularGridGenerators(far, Nmax) + circleGenerators(far, Nmax)
# plist = [ovd.Point(0,0)]
t_before = time.time()
n = 0
id_list = []
for p in plist:
print n, " adding ", p
id_list.append(vd.addVertexSite(p))
n = n + 1
Nsegs = 0
# print "returned: ",vd.addLineSite(69,105,10)
"""
vd.addLineSite(83,35)
vd.addLineSite(63,153)
vd.addLineSite(48,20)
vd.addLineSite(74,143)
vd.addLineSite(125,173)
vd.addLineSite(165,91)
"""
# segs=[]
# for n in range(Nsegs*2):
# ids.append( id_list[n] )
# segs.append( [17,13] )
# segs.append( [21,34] )
# segs.append( [26,44] )
# id1 = id_list[0]
# id2 = id_list[1]
# id3 = id_list[2]
# id4 = id_list[3]
# for seg in segs:
# id1= seg[0]
# id2= seg[1]
# print "add segment ",id1, " to ", id2
# vd.addLineSite( id1, id2 , 20)
# vd.addLineSite( id3, id4 )
t_after = time.time()
calctime = t_after - t_before
if Nmax == 0:
Nmax = 1
print " VD done in ", calctime, " s, ", calctime / Nmax, " s per generator"
vod.setAll()
myscreen.render()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| lgpl-2.1 |
jcftang/ansible | lib/ansible/modules/cloud/amazon/dynamodb_table.py | 25 | 14667 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: dynamodb_table
short_description: Create, update or delete AWS Dynamo DB tables.
version_added: "2.0"
description:
- Create or delete AWS Dynamo DB tables.
- Can update the provisioned throughput on existing tables.
- Returns the status of the specified table.
author: Alan Loi (@loia)
requirements:
- "boto >= 2.37.0"
options:
state:
description:
- Create or delete the table
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the table.
required: true
hash_key_name:
description:
- Name of the hash key.
- Required when C(state=present).
required: false
default: null
hash_key_type:
description:
- Type of the hash key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
range_key_name:
description:
- Name of the range key.
required: false
default: null
range_key_type:
description:
- Type of the range key.
required: false
choices: ['STRING', 'NUMBER', 'BINARY']
default: 'STRING'
read_capacity:
description:
- Read throughput capacity (units) to provision.
required: false
default: 1
write_capacity:
description:
- Write throughput capacity (units) to provision.
required: false
default: 1
indexes:
description:
- list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
- "required options: ['name', 'type', 'hash_key_name']"
- "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']"
- "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
required: false
default: []
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Create dynamo table with hash and range primary key
- dynamodb_table:
name: my-table
region: us-east-1
hash_key_name: id
hash_key_type: STRING
range_key_name: create_time
range_key_type: NUMBER
read_capacity: 2
write_capacity: 2
# Update capacity on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
read_capacity: 10
write_capacity: 10
# set index on existing dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
indexes:
- name: NamedIndex
type: global_include
hash_key_name: id
range_key_name: create_time
includes:
- other_field
- other_field2
read_capacity: 10
write_capacity: 10
# Delete dynamo table
- dynamodb_table:
name: my-table
region: us-east-1
state: absent
'''
RETURN = '''
table_status:
description: The current status of the table.
returned: success
type: string
sample: ACTIVE
'''
import traceback
try:
import boto
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
from boto.dynamodb2.types import STRING, NUMBER, BINARY
from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
from boto.dynamodb2.exceptions import ValidationException
HAS_BOTO = True
DYNAMO_TYPE_MAP = {
'STRING': STRING,
'NUMBER': NUMBER,
'BINARY': BINARY
}
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
DYNAMO_TYPE_DEFAULT = 'STRING'
INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
def create_or_update_dynamo_table(connection, module):
table_name = module.params.get('name')
hash_key_name = module.params.get('hash_key_name')
hash_key_type = module.params.get('hash_key_type')
range_key_name = module.params.get('range_key_name')
range_key_type = module.params.get('range_key_type')
read_capacity = module.params.get('read_capacity')
write_capacity = module.params.get('write_capacity')
all_indexes = module.params.get('indexes')
for index in all_indexes:
validate_index(index, module)
schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
throughput = {
'read': read_capacity,
'write': write_capacity
}
indexes, global_indexes = get_indexes(all_indexes)
result = dict(
region=module.params.get('region'),
table_name=table_name,
hash_key_name=hash_key_name,
hash_key_type=hash_key_type,
range_key_name=range_key_name,
range_key_type=range_key_type,
read_capacity=read_capacity,
write_capacity=write_capacity,
indexes=all_indexes,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
else:
if not module.check_mode:
Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
result['changed'] = True
if not module.check_mode:
result['table_status'] = table.describe()['Table']['TableStatus']
except BotoServerError:
result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def delete_dynamo_table(connection, module):
table_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
table_name=table_name,
)
try:
table = Table(table_name, connection=connection)
if dynamo_table_exists(table):
if not module.check_mode:
table.delete()
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def dynamo_table_exists(table):
try:
table.describe()
return True
except JSONResponseError as e:
if e.message and e.message.startswith('Requested resource not found'):
return False
else:
raise e
def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
table.describe() # populate table details
throughput_changed = False
global_indexes_changed = False
if has_throughput_changed(table, throughput):
if not check_mode:
throughput_changed = table.update(throughput=throughput)
else:
throughput_changed = True
removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
if removed_indexes:
if not check_mode:
for name, index in removed_indexes.items():
global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
else:
global_indexes_changed = True
if added_indexes:
if not check_mode:
for name, index in added_indexes.items():
global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
else:
global_indexes_changed = True
if index_throughput_changes:
if not check_mode:
# todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
try:
global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
except ValidationException:
pass
else:
global_indexes_changed = True
return throughput_changed or global_indexes_changed
def has_throughput_changed(table, new_throughput):
if not new_throughput:
return False
return new_throughput['read'] != table.throughput['read'] or \
new_throughput['write'] != table.throughput['write']
def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
if range_key_name:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
else:
schema = [
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
]
return schema
def get_changed_global_indexes(table, global_indexes):
table.describe()
table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
table_index_objects = dict((index.name, index) for index in table.global_indexes)
set_index_info = dict((index.name, index.schema()) for index in global_indexes)
set_index_objects = dict((index.name, index) for index in global_indexes)
removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
# index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)
return removed_indexes, added_indexes, index_throughput_changes
def validate_index(index, module):
for key, val in index.items():
if key not in INDEX_OPTIONS:
module.fail_json(msg='%s is not a valid option for an index' % key)
for required_option in INDEX_REQUIRED_OPTIONS:
if required_option not in index:
module.fail_json(msg='%s is a required option for an index' % required_option)
if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
def get_indexes(all_indexes):
indexes = []
global_indexes = []
for index in all_indexes:
name = index['name']
schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
throughput = {
'read': index.get('read_capacity', 1),
'write': index.get('write_capacity', 1)
}
if index['type'] == 'all':
indexes.append(AllIndex(name, parts=schema))
elif index['type'] == 'global_all':
global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'global_include':
global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
elif index['type'] == 'global_keys_only':
global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
elif index['type'] == 'include':
indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
elif index['type'] == 'keys_only':
indexes.append(KeysOnlyIndex(name, parts=schema))
return indexes, global_indexes
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
hash_key_name=dict(required=True, type='str'),
hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
range_key_name=dict(type='str'),
range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
read_capacity=dict(default=1, type='int'),
write_capacity=dict(default=1, type='int'),
indexes=dict(default=[], type='list'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_dynamo_table(connection, module)
elif state == 'absent':
delete_dynamo_table(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
greent2008/ucore_lab | related_info/ostep/ostep15-disk/disk-precise.py | 100 | 30277 | #! /usr/bin/env python
from Tkinter import *
from types import *
import math, random, time, sys, os
from optparse import OptionParser
from decimal import *
MAXTRACKS = 1000
# states that a request/disk go through
STATE_NULL = 0
STATE_SEEK = 1
STATE_ROTATE = 2
STATE_XFER = 3
STATE_DONE = 4
#
# TODO
# XXX transfer time
# XXX satf
# XXX skew
# XXX scheduling window
# XXX sstf
# XXX specify requests vs. random requests in range
# XXX add new requests as old ones complete (starvation)
# XXX run in non-graphical mode
# XXX better graphical display (show key, long lists of requests, more timings on screen)
# XXX be able to do "pure" sequential
# XXX add more blocks around outer tracks (zoning)
# XXX simple flag to make scheduling window a fairness window (-F)
# new algs to scan and c-scan the disk?
#
class Disk:
def __init__(self, addr, addrDesc, lateAddr, lateAddrDesc,
policy, seekSpeed, rotateSpeed, skew, window, compute,
graphics, zoning):
self.addr = addr
self.addrDesc = addrDesc
self.lateAddr = lateAddr
self.lateAddrDesc = lateAddrDesc
self.policy = policy
self.seekSpeed = Decimal(seekSpeed)
self.rotateSpeed = Decimal(rotateSpeed)
self.skew = skew
self.window = window
self.compute = compute
self.graphics = graphics
self.zoning = zoning
# figure out zones first, to figure out the max possible request
self.InitBlockLayout()
# figure out requests
random.seed(options.seed)
self.requests = self.MakeRequests(self.addr, self.addrDesc)
self.lateRequests = self.MakeRequests(self.lateAddr, self.lateAddrDesc)
# graphical startup
self.width = 500
if self.graphics:
self.root = Tk()
tmpLen = len(self.requests)
if len(self.lateRequests) > 0:
tmpLen += len(self.lateRequests)
self.canvas = Canvas(self.root, width=410, height=460 + ((tmpLen / 20) * 20))
self.canvas.pack()
# fairness stuff
if self.policy == 'BSATF' and self.window != -1:
self.fairWindow = self.window
else:
self.fairWindow = -1
print 'REQUESTS', self.requests
print ''
# for late requests
self.lateCount = 0
if len(self.lateRequests) > 0:
print 'LATE REQUESTS', self.lateRequests
print ''
if self.compute == False:
print ''
print 'For the requests above, compute the seek, rotate, and transfer times.'
print 'Use -c or the graphical mode (-G) to see the answers.'
print ''
# BINDINGS
if self.graphics:
self.root.bind('s', self.Start)
self.root.bind('p', self.Pause)
self.root.bind('q', self.Exit)
# TRACK INFO
self.tracks = {}
self.trackWidth = 40
self.tracks[0] = 140
self.tracks[1] = self.tracks[0] - self.trackWidth
self.tracks[2] = self.tracks[1] - self.trackWidth
if (self.seekSpeed > 1 and self.trackWidth % self.seekSpeed != 0):
print 'Seek speed (%d) must divide evenly into track width (%d)' % (self.seekSpeed, self.trackWidth)
sys.exit(1)
if self.seekSpeed < 1:
x = (self.trackWidth / self.seekSpeed)
y = int(float(self.trackWidth) / float(self.seekSpeed))
if float(x) != float(y):
print 'Seek speed (%d) must divide evenly into track width (%d)' % (self.seekSpeed, self.trackWidth)
sys.exit(1)
# DISK SURFACE
self.cx = self.width/2.0
self.cy = self.width/2.0
if self.graphics:
self.canvas.create_rectangle(self.cx-175, 30, self.cx - 20, 80, fill='gray', outline='black')
self.platterSize = 320
ps2 = self.platterSize / 2.0
if self.graphics:
self.canvas.create_oval(self.cx-ps2, self.cy-ps2, self.cx+ps2, self.cy + ps2, fill='darkgray', outline='black')
for i in range(len(self.tracks)):
t = self.tracks[i] - (self.trackWidth / 2.0)
if self.graphics:
self.canvas.create_oval(self.cx - t, self.cy - t, self.cx + t, self.cy + t, fill='', outline='black', width=1.0)
# SPINDLE
self.spindleX = self.cx
self.spindleY = self.cy
if self.graphics:
self.spindleID = self.canvas.create_oval(self.spindleX-3, self.spindleY-3, self.spindleX+3, self.spindleY+3, fill='orange', outline='black')
# DISK ARM
self.armTrack = 0
self.armSpeedBase = float(seekSpeed)
self.armSpeed = float(seekSpeed)
distFromSpindle = self.tracks[self.armTrack]
self.armWidth = 20
self.headWidth = 10
self.armX = self.spindleX - (distFromSpindle * math.cos(math.radians(0)))
self.armX1 = self.armX - self.armWidth
self.armX2 = self.armX + self.armWidth
self.armY1 = 50.0
self.armY2 = self.width / 2.0
self.headX1 = self.armX - self.headWidth
self.headX2 = self.armX + self.headWidth
self.headY1 = (self.width/2.0) - self.headWidth
self.headY2 = (self.width/2.0) + self.headWidth
if self.graphics:
self.armID = self.canvas.create_rectangle(self.armX1, self.armY1, self.armX2, self.armY2, fill='gray', outline='black')
self.headID = self.canvas.create_rectangle(self.headX1, self.headY1, self.headX2, self.headY2, fill='gray', outline='black')
self.targetSize = 10.0
if self.graphics:
sz = self.targetSize
self.targetID = self.canvas.create_oval(self.armX1-sz, self.armY1-sz, self.armX1+sz, self.armY1+sz, fill='orange', outline='')
# IO QUEUE
self.queueX = 20
self.queueY = 450
self.requestCount = 0
self.requestQueue = []
self.requestState = []
self.queueBoxSize = 20
self.queueBoxID = {}
self.queueTxtID = {}
# draw each box
for index in range(len(self.requests)):
self.AddQueueEntry(int(self.requests[index]), index)
if self.graphics:
self.canvas.create_text(self.queueX - 5, self.queueY - 20, anchor='w', text='Queue:')
# scheduling window
self.currWindow = self.window
# draw current limits of queue
if self.graphics:
self.windowID = -1
self.DrawWindow()
# initial scheduling info
self.currentIndex = -1
self.currentBlock = -1
# initial state of disk (vs seeking, rotating, transferring)
self.state = STATE_NULL
# DRAW BLOCKS on the TRACKS
for bid in range(len(self.blockInfoList)):
(track, angle, name) = self.blockInfoList[bid]
if self.graphics:
distFromSpindle = self.tracks[track]
xc = self.spindleX + (distFromSpindle * math.cos(math.radians(angle)))
yc = self.spindleY + (distFromSpindle * math.sin(math.radians(angle)))
cid = self.canvas.create_text(xc, yc, text=name, anchor='center')
else:
cid = -1
self.blockInfoList[bid] = (track, angle, name, cid)
# angle of rotation
self.angle = Decimal(0.0)
# TIME INFO
if self.graphics:
self.timeID = self.canvas.create_text(10, 10, text='Time: 0.00', anchor='w')
self.canvas.create_rectangle(95,0,200,18, fill='orange', outline='orange')
self.seekID = self.canvas.create_text(100, 10, text='Seek: 0.00', anchor='w')
self.canvas.create_rectangle(195,0,300,18, fill='lightblue', outline='lightblue')
self.rotID = self.canvas.create_text(200, 10, text='Rotate: 0.00', anchor='w')
self.canvas.create_rectangle(295,0,400,18, fill='green', outline='green')
self.xferID = self.canvas.create_text(300, 10, text='Transfer: 0.00', anchor='w')
self.canvas.create_text(320, 40, text='"s" to start', anchor='w')
self.canvas.create_text(320, 60, text='"p" to pause', anchor='w')
self.canvas.create_text(320, 80, text='"q" to quit', anchor='w')
self.timer = 0
# STATS
self.seekTotal = 0.0
self.rotTotal = 0.0
self.xferTotal = 0.0
# set up animation loop
if self.graphics:
self.doAnimate = True
else:
self.doAnimate = False
self.isDone = False
# call this to start simulation
def Go(self):
if options.graphics:
self.root.mainloop()
else:
self.GetNextIO()
while self.isDone == False:
self.Animate()
# crappy error message
def PrintAddrDescMessage(self, value):
print 'Bad address description (%s)' % value
print 'The address description must be a comma-separated list of length three, without spaces.'
print 'For example, "10,100,0" would indicate that 10 addresses should be generated, with'
print '100 as the maximum value, and 0 as the minumum. A max of -1 means just use the highest'
print 'possible value as the max address to generate.'
sys.exit(1)
#
# ZONES AND BLOCK LAYOUT
#
def InitBlockLayout(self):
self.blockInfoList = []
self.blockToTrackMap = {}
self.blockToAngleMap = {}
self.tracksBeginEnd = {}
self.blockAngleOffset = []
zones = self.zoning.split(',')
assert(len(zones) == 3)
for i in range(len(zones)):
self.blockAngleOffset.append(int(zones[i]) / 2)
track = 0 # outer track
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = angle / angleOffset
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle
self.blockInfoList.append((track, angle, block))
self.tracksBeginEnd[track] = (0, block)
pblock = block + 1
track = 1 # middle track
skew = self.skew
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = (angle / angleOffset) + pblock
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle + (angleOffset * skew)
self.blockInfoList.append((track, angle + (angleOffset * skew), block))
self.tracksBeginEnd[track] = (pblock, block)
pblock = block + 1
track = 2 # inner track
skew = 2 * self.skew
angleOffset = 2 * self.blockAngleOffset[track]
for angle in range(0, 360, angleOffset):
block = (angle / angleOffset) + pblock
self.blockToTrackMap[block] = track
self.blockToAngleMap[block] = angle + (angleOffset * skew)
self.blockInfoList.append((track, angle + (angleOffset * skew), block))
self.tracksBeginEnd[track] = (pblock, block)
self.maxBlock = pblock
# print 'MAX BLOCK:', self.maxBlock
# adjust angle to starting position relative
for i in self.blockToAngleMap:
self.blockToAngleMap[i] = (self.blockToAngleMap[i] + 180) % 360
# print 'btoa map', self.blockToAngleMap
# print 'btot map', self.blockToTrackMap
# print 'bao', self.blockAngleOffset
def MakeRequests(self, addr, addrDesc):
(numRequests, maxRequest, minRequest) = (0, 0, 0)
if addr == '-1':
# first extract values from descriptor
desc = addrDesc.split(',')
if len(desc) != 3:
self.PrintAddrDescMessage(addrDesc)
(numRequests, maxRequest, minRequest) = (int(desc[0]), int(desc[1]), int(desc[2]))
if maxRequest == -1:
maxRequest = self.maxBlock
# now make list
tmpList = []
for i in range(numRequests):
tmpList.append(int(random.random() * maxRequest) + minRequest)
return tmpList
else:
return addr.split(',')
#
# BUTTONS
#
def Start(self, event):
self.GetNextIO()
self.doAnimate = True
self.Animate()
def Pause(self, event):
if self.doAnimate == False:
self.doAnimate = True
else:
self.doAnimate = False
def Exit(self, event):
sys.exit(0)
#
# CORE SIMULATION and ANIMATION
#
def UpdateTime(self):
if self.graphics:
self.canvas.itemconfig(self.timeID, text='Time: ' + str(self.timer))
self.canvas.itemconfig(self.seekID, text='Seek: ' + str(self.seekTotal))
self.canvas.itemconfig(self.rotID, text='Rotate: ' + str(self.rotTotal))
self.canvas.itemconfig(self.xferID, text='Transfer: ' + str(self.xferTotal))
def AddRequest(self, block):
self.AddQueueEntry(block, len(self.requestQueue))
def QueueMap(self, index):
numPerRow = 400 / self.queueBoxSize
return (index % numPerRow, index / numPerRow)
def DrawWindow(self):
if self.window == -1:
return
(col, row) = self.QueueMap(self.currWindow)
if col == 0:
(col, row) = (20, row - 1)
if self.windowID != -1:
self.canvas.delete(self.windowID)
self.windowID = self.canvas.create_line(self.queueX + (col * 20) - 10, self.queueY - 13 + (row * 20),
self.queueX + (col * 20) - 10, self.queueY + 13 + (row * 20), width=2)
def AddQueueEntry(self, block, index):
self.requestQueue.append((block, index))
self.requestState.append(STATE_NULL)
if self.graphics:
(col, row) = self.QueueMap(index)
sizeHalf = self.queueBoxSize / 2.0
(cx, cy) = (self.queueX + (col * self.queueBoxSize), self.queueY + (row * self.queueBoxSize))
self.queueBoxID[index] = self.canvas.create_rectangle(cx - sizeHalf, cy - sizeHalf, cx + sizeHalf, cy + sizeHalf, fill='white')
self.queueTxtID[index] = self.canvas.create_text(cx, cy, anchor='center', text=str(block))
def SwitchColors(self, c):
if self.graphics:
self.canvas.itemconfig(self.queueBoxID[self.currentIndex], fill=c)
self.canvas.itemconfig(self.targetID, fill=c)
def SwitchState(self, newState):
self.state = newState
self.requestState[self.currentIndex] = newState
def RadiallyCloseTo(self, a1, a2):
if a1 > a2:
v = a1 - a2
else:
v = a2 - a1
if v < self.rotateSpeed:
return True
return False
def DoneWithTransfer(self):
angleOffset = self.blockAngleOffset[self.armTrack]
# if int(self.angle) == (self.blockToAngleMap[self.currentBlock] + angleOffset) % 360:
if self.RadiallyCloseTo(self.angle, Decimal((self.blockToAngleMap[self.currentBlock] + angleOffset) % 360)):
# print 'END TRANSFER', self.angle, self.timer
self.SwitchState(STATE_DONE)
self.requestCount += 1
return True
return False
def DoneWithRotation(self):
angleOffset = self.blockAngleOffset[self.armTrack]
# XXX there is a weird bug in here
# print self.timer, 'ROTATE:: ', self.currentBlock, 'currangle: ', self.angle, ' - mapangle: ', self.blockToAngleMap[self.currentBlock]
# print ' angleOffset ', angleOffset
# print ' blockMap ', (self.blockToAngleMap[self.currentBlock] - angleOffset) % 360
# print ' self.angle ', self.angle, int(self.angle)
# if int(self.angle) == (self.blockToAngleMap[self.currentBlock] - angleOffset) % 360:
if self.RadiallyCloseTo(self.angle, Decimal((self.blockToAngleMap[self.currentBlock] - angleOffset) % 360)):
self.SwitchState(STATE_XFER)
# print ' --> DONE WITH ROTATION!', self.timer
return True
return False
def PlanSeek(self, track):
self.seekBegin = self.timer
self.SwitchColors('orange')
self.SwitchState(STATE_SEEK)
if track == self.armTrack:
self.rotBegin = self.timer
self.SwitchColors('lightblue')
self.SwitchState(STATE_ROTATE)
return
self.armTarget = track
self.armTargetX1 = self.spindleX - self.tracks[track] - (self.trackWidth / 2.0)
if track >= self.armTrack:
self.armSpeed = self.armSpeedBase
else:
self.armSpeed = - self.armSpeedBase
def DoneWithSeek(self):
# move the disk arm
self.armX1 += self.armSpeed
self.armX2 += self.armSpeed
self.headX1 += self.armSpeed
self.headX2 += self.armSpeed
# update it on screen
if self.graphics:
self.canvas.coords(self.armID, self.armX1, self.armY1, self.armX2, self.armY2)
self.canvas.coords(self.headID, self.headX1, self.headY1, self.headX2, self.headY2)
# check if done
if (self.armSpeed > 0.0 and self.armX1 >= self.armTargetX1) or (self.armSpeed < 0.0 and self.armX1 <= self.armTargetX1):
self.armTrack = self.armTarget
return True
return False
def DoSATF(self, rList):
minBlock = -1
minIndex = -1
minEst = -1
# print '**** DoSATF ****', rList
for (block, index) in rList:
if self.requestState[index] == STATE_DONE:
continue
track = self.blockToTrackMap[block]
angle = self.blockToAngleMap[block]
# print 'track', track, 'angle', angle
# estimate seek time
dist = int(math.fabs(self.armTrack - track))
seekEst = Decimal(self.trackWidth / self.armSpeedBase) * dist
# estimate rotate time
angleOffset = self.blockAngleOffset[track]
angleAtArrival = (Decimal(self.angle) + (seekEst * self.rotateSpeed))
while angleAtArrival > 360.0:
angleAtArrival -= 360.0
rotDist = Decimal((angle - angleOffset) - angleAtArrival)
while rotDist > 360.0:
rotDist -= Decimal(360.0)
while rotDist < 0.0:
rotDist += Decimal(360.0)
rotEst = rotDist / self.rotateSpeed
# finally, transfer
xferEst = (Decimal(angleOffset) * Decimal(2.0)) / self.rotateSpeed
totalEst = seekEst + rotEst + xferEst
# should probably pick one on same track in case of a TIE
if minEst == -1 or totalEst < minEst:
minEst = totalEst
minBlock = block
minIndex = index
# END loop
# when done
self.totalEst = minEst
assert(minBlock != -1)
assert(minIndex != -1)
return (minBlock, minIndex)
#
# actually doesn't quite do SSTF
# just finds all the blocks on the nearest track
# (whatever that may be) and returns it as a list
#
def DoSSTF(self, rList):
minDist = MAXTRACKS
minBlock = -1
trackList = [] # all the blocks on a track
for (block, index) in rList:
if self.requestState[index] == STATE_DONE:
continue
track = self.blockToTrackMap[block]
dist = int(math.fabs(self.armTrack - track))
if dist < minDist:
trackList = []
trackList.append((block, index))
minDist = dist
elif dist == minDist:
trackList.append((block, index))
assert(trackList != [])
return trackList
def UpdateWindow(self):
if self.fairWindow == -1 and self.currWindow > 0 and self.currWindow < len(self.requestQueue):
self.currWindow += 1
if self.graphics:
self.DrawWindow()
def GetWindow(self):
if self.currWindow <= -1:
return len(self.requestQueue)
else:
if self.fairWindow != -1:
if self.requestCount > 0 and (self.requestCount % self.fairWindow == 0):
self.currWindow = self.currWindow + self.fairWindow
if self.currWindow > len(self.requestQueue):
self.currWindow = len(self.requestQueue)
if self.graphics:
self.DrawWindow()
return self.currWindow
else:
return self.currWindow
def GetNextIO(self):
# check if done: if so, print stats and end animation
if self.requestCount == len(self.requestQueue):
self.UpdateTime()
self.PrintStats()
self.doAnimate = False
self.isDone = True
return
# do policy: should set currentBlock,
if self.policy == 'FIFO':
(self.currentBlock, self.currentIndex) = self.requestQueue[self.requestCount]
self.DoSATF(self.requestQueue[self.requestCount:self.requestCount+1])
elif self.policy == 'SATF' or self.policy == 'BSATF':
(self.currentBlock, self.currentIndex) = self.DoSATF(self.requestQueue[0:self.GetWindow()])
elif self.policy == 'SSTF':
# first, find all the blocks on a given track (given window constraints)
trackList = self.DoSSTF(self.requestQueue[0:self.GetWindow()])
# then, do SATF on those blocks (otherwise, will not do them in obvious order)
(self.currentBlock, self.currentIndex) = self.DoSATF(trackList)
else:
print 'policy (%s) not implemented' % self.policy
sys.exit(1)
# once best block is decided, go ahead and do the seek
self.PlanSeek(self.blockToTrackMap[self.currentBlock])
# add another block?
if len(self.lateRequests) > 0 and self.lateCount < len(self.lateRequests):
self.AddRequest(self.lateRequests[self.lateCount])
self.lateCount += 1
def Animate(self):
if self.graphics == True and self.doAnimate == False:
self.root.after(20, self.Animate)
return
# timer
self.timer += 1
self.UpdateTime()
# see which blocks are rotating on the disk
# print 'SELF ANGLE', self.angle
self.angle = Decimal(self.angle + self.rotateSpeed)
if self.angle >= 360.0:
self.angle = Decimal(0.0)
# move the blocks
if self.graphics:
for (track, angle, name, cid) in self.blockInfoList:
distFromSpindle = self.tracks[track]
na = angle - self.angle
xc = self.spindleX + (distFromSpindle * math.cos(math.radians(na)))
yc = self.spindleY + (distFromSpindle * math.sin(math.radians(na)))
if self.graphics:
self.canvas.coords(cid, xc, yc)
if self.currentBlock == name:
sz = self.targetSize
self.canvas.coords(self.targetID, xc-sz, yc-sz, xc+sz, yc+sz)
# move the arm OR wait for a rotational delay
if self.state == STATE_SEEK:
if self.DoneWithSeek():
self.rotBegin = self.timer
self.SwitchState(STATE_ROTATE)
self.SwitchColors('lightblue')
if self.state == STATE_ROTATE:
# check for read (disk arm must be settled)
if self.DoneWithRotation():
self.xferBegin = self.timer
self.SwitchState(STATE_XFER)
self.SwitchColors('green')
if self.state == STATE_XFER:
if self.DoneWithTransfer():
self.DoRequestStats()
self.SwitchState(STATE_DONE)
self.SwitchColors('red')
self.UpdateWindow()
currentBlock = self.currentBlock
self.GetNextIO()
nextBlock = self.currentBlock
if self.blockToTrackMap[currentBlock] == self.blockToTrackMap[nextBlock]:
if (currentBlock == self.tracksBeginEnd[self.armTrack][1] and nextBlock == self.tracksBeginEnd[self.armTrack][0]) or (currentBlock + 1 == nextBlock):
# need a special case here: to handle when we stay in transfer mode
(self.rotBegin, self.seekBegin, self.xferBegin) = (self.timer, self.timer, self.timer)
self.SwitchState(STATE_XFER)
self.SwitchColors('green')
# make sure to keep the animation going!
if self.graphics:
self.root.after(20, self.Animate)
def DoRequestStats(self):
seekTime = self.rotBegin - self.seekBegin
rotTime = self.xferBegin - self.rotBegin
xferTime = self.timer - self.xferBegin
totalTime = self.timer - self.seekBegin
if self.compute == True:
print 'Block: %3d Seek:%3d Rotate:%3d Transfer:%3d Total:%4d' % (self.currentBlock, seekTime, rotTime, xferTime, totalTime)
# if int(totalTime) != int(self.totalEst):
# print 'INTERNAL ERROR: estimate was', self.totalEst, 'whereas actual time to access block was', totalTime
# print 'Please report this bug and as much information as possible so as to make it easy to recreate. Thanks!'
# update stats
self.seekTotal += seekTime
self.rotTotal += rotTime
self.xferTotal += xferTime
def PrintStats(self):
if self.compute == True:
print '\nTOTALS Seek:%3d Rotate:%3d Transfer:%3d Total:%4d\n' % (self.seekTotal, self.rotTotal, self.xferTotal, self.timer)
# END: class Disk
#
# MAIN SIMULATOR
#
parser = OptionParser()
parser.add_option('-s', '--seed', default='0', help='Random seed', action='store', type='int', dest='seed')
parser.add_option('-a', '--addr', default='-1', help='Request list (comma-separated) [-1 -> use addrDesc]', action='store', type='string', dest='addr')
parser.add_option('-A', '--addrDesc', default='5,-1,0', help='Num requests, max request (-1->all), min request', action='store', type='string', dest='addrDesc')
parser.add_option('-S', '--seekSpeed', default='1', help='Speed of seek', action='store', type='string', dest='seekSpeed')
parser.add_option('-R', '--rotSpeed', default='1', help='Speed of rotation', action='store', type='string', dest='rotateSpeed')
parser.add_option('-p', '--policy', default='FIFO', help='Scheduling policy (FIFO, SSTF, SATF, BSATF)', action='store', type='string', dest='policy')
parser.add_option('-w', '--schedWindow', default=-1, help='Size of scheduling window (-1 -> all)', action='store', type='int', dest='window')
parser.add_option('-o', '--skewOffset', default=0, help='Amount of skew (in blocks)', action='store', type='int', dest='skew')
parser.add_option('-z', '--zoning', default='30,30,30', help='Angles between blocks on outer,middle,inner tracks', action='store', type='string', dest='zoning')
parser.add_option('-G', '--graphics', default=False, help='Turn on graphics', action='store_true', dest='graphics')
parser.add_option('-l', '--lateAddr', default='-1', help='Late: request list (comma-separated) [-1 -> random]', action='store', type='string', dest='lateAddr')
parser.add_option('-L', '--lateAddrDesc', default='0,-1,0', help='Num requests, max request (-1->all), min request', action='store', type='string', dest='lateAddrDesc')
parser.add_option('-c', '--compute', default=False, help='Compute the answers', action='store_true', dest='compute')
(options, args) = parser.parse_args()
print 'OPTIONS seed', options.seed
print 'OPTIONS addr', options.addr
print 'OPTIONS addrDesc', options.addrDesc
print 'OPTIONS seekSpeed', options.seekSpeed
print 'OPTIONS rotateSpeed', options.rotateSpeed
print 'OPTIONS skew', options.skew
print 'OPTIONS window', options.window
print 'OPTIONS policy', options.policy
print 'OPTIONS compute', options.compute
print 'OPTIONS graphics', options.graphics
print 'OPTIONS zoning', options.zoning
print 'OPTIONS lateAddr', options.lateAddr
print 'OPTIONS lateAddrDesc', options.lateAddrDesc
print ''
if options.window == 0:
print 'Scheduling window (%d) must be positive or -1 (which means a full window)' % options.window
sys.exit(1)
if options.graphics and options.compute == False:
print '\nWARNING: Setting compute flag to True, as graphics are on\n'
options.compute = True
# set up simulator info
d = Disk(addr=options.addr, addrDesc=options.addrDesc, lateAddr=options.lateAddr, lateAddrDesc=options.lateAddrDesc,
policy=options.policy, seekSpeed=Decimal(options.seekSpeed), rotateSpeed=Decimal(options.rotateSpeed),
skew=options.skew, window=options.window, compute=options.compute, graphics=options.graphics, zoning=options.zoning)
# run simulation
d.Go()
| gpl-2.0 |
mvdriel/ansible-modules-core | cloud/amazon/ec2_vpc.py | 104 | 27780 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. 10.0.0.0/16"
required: false, unless state=present
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- toggles the "Enable DNS resolution" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- toggles the "Enable DNS hostname support for instances" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
aliases: []
vpc_id:
description:
- A VPC id to terminate when state=absent
required: false
default: null
aliases: []
resource_tags:
description:
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true
default: null
aliases: []
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
route_tables:
description:
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false
default: null
aliases: []
wait:
description:
- wait for the VPC to be in state 'available' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
state:
description:
- Create or terminate the VPC
required: true
default: present
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: "Carson Gee (@carsongee)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
If you have added elements not managed by this module, e.g. instances, NATs, etc then
the delete will fail until those dependencies are removed.
'''
import time
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id == None and cidr == None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are diffrent.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
route_kwargs['instance_id'] = None
route_kwargs['state'] = 'active'
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
route_kwargs['gateway_id'] = None
route_kwargs['state'] = 'active'
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['instance_id'] = None
route_kwargs['state'] = 'active'
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC conection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is diffrence beween the provided routes and remote routes and if subnet assosications are diffrent.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError, e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', None)
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# indempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id != None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError, e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError, e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = boto.vpc.connect_to_region(
region,
**aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
ebrelsford/django-phillydata | phillydata/opa/migrations/0001_initial.py | 1 | 3517 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('owners', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountOwner',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=256, verbose_name='name')),
('owner', models.ForeignKey(verbose_name='owner', blank=True, to='owners.Owner', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BillingAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('external_id', models.CharField(help_text='The OPA account number (also called "BRT number")', unique=True, max_length=50, verbose_name='external id')),
('property_address', models.CharField(help_text='The address of the property this account is associated with', max_length=300, null=True, verbose_name='property address', blank=True)),
('improvement_description', models.CharField(help_text='The improvement description according to OPA', max_length=300, null=True, verbose_name='improvement description', blank=True)),
('sale_date', models.DateField(help_text='The date of the last sale of this property according to the OPA', null=True, verbose_name='sale date', blank=True)),
('land_area', models.DecimalField(decimal_places=3, max_digits=20, blank=True, help_text='The land area of the property according to the OPA in square feet', null=True, verbose_name='land area (sq ft)')),
('improvement_area', models.IntegerField(help_text='The improvement area of the property according to the OPA', null=True, verbose_name='improvement area', blank=True)),
('assessment', models.DecimalField(decimal_places=2, max_digits=20, blank=True, help_text='The assessment of the property according to the OPA', null=True, verbose_name='assessment')),
('mailing_name', models.CharField(help_text='The name on the mailing address for this account.', max_length=300, null=True, verbose_name='mailing name', blank=True)),
('mailing_address', models.CharField(help_text='The mailing address for this account.', max_length=300, null=True, verbose_name='mailing address', blank=True)),
('mailing_postal_code', models.CharField(max_length=10, null=True, verbose_name='mailing postal code', blank=True)),
('mailing_city', models.CharField(max_length=50, null=True, verbose_name='mailing city', blank=True)),
('mailing_state_province', models.CharField(max_length=40, null=True, verbose_name='mailing state/province', blank=True)),
('mailing_country', models.CharField(default=b'USA', max_length=40, null=True, verbose_name='mailing country', blank=True)),
('last_updated', models.DateTimeField(auto_now=True, verbose_name='last updated')),
('account_owner', models.ForeignKey(verbose_name='account owner', blank=True, to='opa.AccountOwner', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause |
auferack08/edx-platform | lms/djangoapps/courseware/migrations/0003_done_grade_cache.py | 194 | 8745 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# NOTE (vshnayder): This constraint has the wrong field order, so it doesn't actually
# do anything in sqlite. Migration 0004 actually removes this index for sqlite.
# Removing unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'module_type', 'student_id'])
# Adding field 'StudentModule.max_grade'
db.add_column('courseware_studentmodule', 'max_grade', self.gf('django.db.models.fields.FloatField')(null=True, blank=True), keep_default=False)
# Adding field 'StudentModule.done'
db.add_column('courseware_studentmodule', 'done', self.gf('django.db.models.fields.CharField')(default='na', max_length=8, db_index=True), keep_default=False)
# Adding unique constraint on 'StudentModule', fields ['module_id', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'student_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentModule', fields ['module_id', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'student_id'])
# Deleting field 'StudentModule.max_grade'
db.delete_column('courseware_studentmodule', 'max_grade')
# Deleting field 'StudentModule.done'
db.delete_column('courseware_studentmodule', 'done')
# Adding unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'module_type', 'student_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_id'),)", 'object_name': 'StudentModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
| agpl-3.0 |
ebar0n/django | tests/template_tests/filter_tests/test_iriencode.py | 133 | 1538 | from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause |
ncullen93/pyBN | pyBN/inference/marginal_exact/exact_bp.py | 1 | 2676 |
__author__ = """N. Cullen <ncullen.th@dartmouth.edu>"""
from pyBN.classes.factor import Factor
from pyBN.classes.factorization import Factorization
from pyBN.utils.graph import *
from copy import deepcopy, copy
import numpy as np
import json
def exact_bp(bn, target=None, evidence=None, downward_pass=False):
"""
Perform Belief Propagation (Message Passing) over a Clique Tree. This
is sometimes referred to as the "Junction Tree Algorithm" or
the "Hugin Algorithm".
It involves an Upward Pass (see [1] pg. 353) along with
Downward Pass (Calibration) ([1] pg. 357) if the target involves
multiple random variables - i.e. is a list
Steps Involved:
1. Build a Clique Tree from a Bayesian Network
a. Moralize the BN
b. Triangulate the graph
c. Find maximal cliques and collapse into nodes
d. Create complete graph and make edge weights = sepset cardinality
e. Using Max Spanning Tree to create a tree of cliques
2. Assign each factor to only one clique
3. Compute the initial potentials of each clique
- multiply all of the clique's factors together
4. Perform belief propagation based on message passing protocol.
Arguments
---------
*bn* : a BayesNet object
Returns
-------
Notes
-----
"""
# 1: Moralize the graph
# 2: Triangluate
# 3: Build a clique tree using max spanning
# 4: Propagation of probabilities using message passing
# creates clique tree and assigns factors, thus satisfying steps 1-3
ctree = CliqueTree(bn) # might not be initialized?
#G = ctree.G
#cliques = copy.copy(ctree.V)
# select a clique as root where target is in scope of root
root = ctree.V[0]
if target is not None:
for v in ctree.V:
if target in ctree[v].scope:
root = v
break
clique_ordering = ctree.dfs_postorder(root=root)
# UPWARD PASS
# send messages up the tree from the leaves to the single root
for i in clique_ordering:
#clique = ctree[i]
for j in ctree.parents(i):
ctree[i] >> ctree[j]
#clique.send_message(ctree[j])
# if root node, collect its beliefs
#if len(ctree.parents(i)) == 0:
#ctree[root].collect_beliefs()
ctree[root].collect_beliefs()
marginal_target = ctree[root].marginalize_over(target)
# DOWNWARD PASS
if downward_pass == True:
# send messages down the tree from the root to the leaves
# (not needed unless *target* involves more than one variable)
new_ordering = list(reversed(clique_ordering))
for j in new_ordering:
for i in ctree.children(j):
ctree[j] >> ctree[i]
# if leaf node, collect its beliefs
if len(ctree.children(j)) == 0:
ctree[j].collect_beliefs()
return marginal_target
# beliefs hold the answers | mit |
carnotweat/cpupimp | libs/guessit/transfo/post_process.py | 142 | 2827 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import subtitle_exts
from guessit.textutils import reorder_title, find_words
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
# pylint: disable=W0631
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if we find the word 'sub' before the language, and in the same explicit
# group, then upgrade the language
explicit_group = mtree.node_at(node.node_idx[:2])
group_str = explicit_group.value.lower()
if ('sub' in find_words(group_str) and
0 <= group_str.find('sub') < (node.span[0] - explicit_group.span[0])):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] == 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
node.guess['series'] = reorder_title(node.guess['series'])
| gpl-3.0 |
rgeleta/odoo | addons/account/project/__init__.py | 427 | 1100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
4shadoww/usploit | core/lib/past/builtins/misc.py | 62 | 2500 | from __future__ import unicode_literals
import sys
import inspect
from collections import Mapping
from future.utils import PY3, exec_
if PY3:
import builtins
def apply(f, *args, **kw):
return f(*args, **kw)
from past.builtins import str as oldstr
def chr(i):
"""
Return a byte-string of one character with ordinal i; 0 <= i <= 256
"""
return oldstr(bytes((i,)))
def cmp(x, y):
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return (x > y) - (x < y)
from sys import intern
def oct(number):
"""oct(number) -> string
Return the octal representation of an integer
"""
return '0' + builtins.oct(number)[2:]
raw_input = input
from imp import reload
unicode = str
unichr = chr
xrange = range
else:
import __builtin__
apply = __builtin__.apply
chr = __builtin__.chr
cmp = __builtin__.cmp
execfile = __builtin__.execfile
intern = __builtin__.intern
oct = __builtin__.oct
raw_input = __builtin__.raw_input
reload = __builtin__.reload
unicode = __builtin__.unicode
unichr = __builtin__.unichr
xrange = __builtin__.xrange
if PY3:
def execfile(filename, myglobals=None, mylocals=None):
"""
Read and execute a Python script from a file in the given namespaces.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
if myglobals is None:
# There seems to be no alternative to frame hacking here.
caller_frame = inspect.stack()[1]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
elif mylocals is None:
# Only if myglobals is given do we set mylocals to it.
mylocals = myglobals
if not isinstance(myglobals, Mapping):
raise TypeError('globals must be a mapping')
if not isinstance(mylocals, Mapping):
raise TypeError('locals must be a mapping')
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec_(code, myglobals, mylocals)
if PY3:
__all__ = ['apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
'reload', 'unichr', 'unicode', 'xrange']
else:
__all__ = []
| mit |
clody23/MToolBox | MToolBox/mt-classifier.py | 1 | 13234 | #!/usr/bin/env python
import getopt, sys, re, os, glob, csv
from classifier import tree, NGclassify, consts, datatypes, parse_mhcs
from bioinf.seqs import SeqList
import io_modules.csv
import io_modules.old_table
import io_modules.serialize
import os.path
# folder where to find data for haplogroup classification and functional annotation
data_file = os.path.dirname(sys.argv[0])
def usage_old():
print """\nAssigns haplogroup to contigs and performs functional annotation
Options:
-i Contig file [mtDNAassembly-Contigs.fasta]
-g GMAP executable PATH [/usr/local/bin/gmap]
-D GMAP mt sequences database location [/usr/local/share/gmapdb]
-m GMAP mt sequences database [mt_mhcss]
-t GMAP threads [2]
-b basename for output files
"""
def usage():
print """\nAssigns haplogroup to contigs and performs functional annotation
Options:
-i Contig file [mtDNAassembly-Contigs.fasta]
-m MUSCLE executable PATH [/usr/local/bin/muscle]
-b basename for output files
-s file with most reliable haplogroup prediction
"""
def pickle_csv(csvfile, pickle_fname=None):
tree_file = csv.reader(open(csvfile, 'rb'))
if pickle_fname is None:
pickle_fname = csvfile + '.pickle'
aplo_list = io_modules.csv.parse_csv(tree_file)
htree = tree.HaplogroupTree(aplo_list=aplo_list)
pickle_file = open(pickle_fname, 'wb')
pickle_file.write(htree.serialize())
def write_old_table(pickle_fname, out_fname):
htree = tree.HaplogroupTree(pickle_data=open(pickle_fname, 'rb').read())
fh = csv.writer(open(out_fname, 'wb'))
for haplo_name in htree:
io_modules.old_table.write_haplogroup(fh, '', htree[haplo_name])
def parse_gmapf9_line(line):
parsed = line.split('\t')
last_field = re.findall(r"[\w']+", parsed[2])
seq_nuc = parsed[1].partition(' ')[2]
seq_index = parsed[1].partition(' ')[0]
ref_pos = int(last_field[1])
ref_nuc = parsed[2][-1]
return ref_pos, ref_nuc, seq_nuc, seq_index
def parse_gmapf9_file(inhandle):
contigs_mappings = [[]]
h = inhandle.readlines()
c = 0
mutations = []
while c < len(h):
# end coordinate of last contig
if c == len(h)-1:
contigs_mappings[-1].append(parse_gmapf9_line(h[c])[0])
if h[c][0] != '>':
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
# insertion
if ref_nuc == ' ' and seq_nuc != ' ':
# gmap assigns the position of the next nucleotide to the insertion
pos_ins = ref_pos - 1
ins = [seq_nuc]
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
while c < len(h) and (ref_nuc == ' ' and seq_nuc != ' '):
ins.append(seq_nuc)
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
mut = datatypes.Insertion("%d.%s" % (pos_ins, ''.join(ins)))
mutations.append(mut)
#print "%d.%s" % (pos_ins, ''.join(ins))
# deletion
elif ref_nuc != ' ' and seq_nuc == ' ':
pos_del = ref_pos
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
while c < len(h) and (ref_nuc != ' ' and seq_nuc == ' '):
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
if pos_del == ref_pos-1:
print "%dd" % (pos_del)
mut = datatypes.Deletion("%dd" % pos_del)
mutations.append(mut)
else:
print "%d-%dd" % (pos_del, ref_pos-1)
mut = datatypes.Deletion("%d-%dd" % (pos_del, ref_pos-1))
mutations.append(mut)
# mismatch
elif ref_nuc != seq_nuc:
if seq_nuc != 'N':
# Transition
if (ref_nuc in consts.PUR and seq_nuc in consts.PUR) or (ref_nuc in consts.PYR and seq_nuc in consts.PYR):
print "%d%s" % (ref_pos, seq_nuc)
mut = datatypes.Transition(ref_pos)
mutations.append(mut)
# Transversion
if (ref_nuc in consts.PUR and seq_nuc in consts.PYR) or (ref_nuc in consts.PYR and seq_nuc in consts.PUR):
mut = datatypes.Transversion("%d%s" % (ref_pos, seq_nuc))
mutations.append(mut)
c += 1
else:
c += 1
else:
# first contig
if len(contigs_mappings) == 1 and len(contigs_mappings[-1]) == 0:
contigs_mappings[-1].append(parse_gmapf9_line(h[c+1])[0])
# all the others
else:
contigs_mappings[-1].append(parse_gmapf9_line(h[c-1])[0])
contigs_mappings.append([parse_gmapf9_line(h[c+1])[0]])
c += 1
# don't know if contig coordinate sorting is needed but I'll do anyway
contigs_mappings.sort()
return mutations, contigs_mappings
def merge_tables(f, g, h):
fgh = f + g + h
mergedlist = []
for jj in fgh:
if jj not in mergedlist:
mergedlist.append(jj)
o = []
o.append(["", "RSRS", "MHCS", "rCRS"])
y = "yes"
n = ""
for i in mergedlist:
if i in f and i in g and i in h:
o.append([i.pprint(),y,y,y])
elif i in f and i in g:
o.append([i.pprint(),y,y,n])
elif i in f and i in h:
o.append([i.pprint(),y,n,y])
elif i in g and i in h:
o.append([i.pprint(),n,y,y])
elif i in f:
o.append([i.pprint(),y,n,n])
elif i in g:
o.append([i.pprint(),n,y,n])
elif i in h:
o.append([i.pprint(),n,n,y])
return o
def align_sequence(muscle_exe, sequence, rif=None, ):
"""sequence is a datatypes.Sequence, rif"""
if rif is None:
rif = datatypes.Sequence('RSRS', consts.RCRS)
seq_diff = NGclassify.SequenceDiff()
#print "Aligning sequence %s" % sequence.name
seq_diff.gen_diff(muscle_exe, rif, datatypes.Sequence(sequence.name, str(sequence)))
#print "-"*30
return seq_diff
def h_analysis(htrees, seq_diff, regions, mhcs_dict):
a = NGclassify.Classify()
#print "Classification of sequence %s" % seq_diff.obj.name
for htree, name in htrees:
print "Classification according to tree:", name
a.classify_by_tree(htree, seq_diff, regions)
#print "start is ", seq_diff.start
#print "end is ", seq_diff.end
#print "haplo_stats: ", a.haplo_stats
print "genome_state is ", a.get_genome_state()
(haplo_stats_sorted, haplo_best) = a.prediction_sorting()
print haplo_best
#print "haplo_stats_sorted is:\n", haplo_stats_sorted
print "="*20
#print "haplo_best is: ", haplo_best
#print "finding MHCS for sequence %s" % seq_diff.obj.name
mhcss = a.get_mhcss(mhcs_dict)
#print "MHCS ID for sequence %s is %s" % (seq_diff.obj.name, ','.join(list(mhcss)))
# PROVA PRINT
# print "stat_list is:"
# print type(a.__dict__['stat_list'])
#print a.__dict__
print '-'*30
#print a.seq_diff.obj.name
#print a.haplo_stats
#pdb.set_trace()
return a
def load_sequences(fname):
a = SeqList()
a.load_file(fname)
print "Loaded %d contig sequences" % len(a)
return a
def write_output(class_obj, seq_diff, seq_diff_mhcs, seq_diff_rcrs, merged_tables, outfile):
print "Writing results for sequence %s" % outfile
class_obj.pprint(open(outfile + '.csv', 'w'))
class_obj.pprint_sorted(open(outfile + '.sorted.csv', 'w'))
#seq_diff.print_alg(open(outfile + '_alg.txt','w'))
#seq_diff.pprint(open(outfile + '_diff.txt','w'))
#seq_diff_mhcs.pprint(open(outfile + '_mhcs_diff.txt','w'))
#seq_diff_mhcs.print_alg(open(outfile + '_mhcs_alg.txt','w'))
#seq_diff_rcrs.pprint(open(outfile + '_rcrs_diff.txt','w'))
#seq_diff_rcrs.print_alg(open(outfile + '_rcrs_alg.txt','w'))
merged_tables_file = open(outfile + '_merged_diff.csv', 'w')
for row in merged_tables:
merged_tables_file.write(','.join(row)+'\n')
def main_mt_hpred():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:m:b:s:")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit()
#print opts, args
contig_file = 'mtDNAassembly-contigs.fasta'
muscle_exe='/usr/local/bin/muscle'
basename='mtDNAassembly-contigs'
best_results_file = 'mt_classification_best_results.csv'
#print opts
for o,a in opts:
#print "option", o, "argument", a
if o == "-h":
usage()
sys.exit()
elif o == "-i": contig_file = a
elif o == "-m": muscle_exe = a
elif o == "-b": basename = a
elif o == "-s": best_results_file = a
else:
assert False, "Unhandled option."
print "Your best results file is ", best_results_file
# sample name
f = os.path.abspath(contig_file)
#sample_name = f.split('/')[-2].split('_')[-1]
sample_name = contig_file.split('-')[0]
# haplogroup tree parsing
htrees = [(tree.HaplogroupTree(pickle_data=open(data_file + '/data/phylotree_r16.pickle', 'rb').read()), data_file + '/data/phylotree_r16.pickle')]
# mhcs parsing
mhcs_dict = parse_mhcs.parse2mhcs_dict(data_file + '/data/mhcs.tab')
print "\nLoading contig sequences from file %s" % contig_file
contig_array = load_sequences(contig_file)
contig_array_seqdiff = [] # lista di liste
contig_total_seqdiff = [] # lista di varianti
contig_array_mappings = []
print "\nAligning Contigs to mtDNA reference genome...\n"
# update each contig's SeqDiff
for x,contig in enumerate(contig_array):
if x == 0:
contig_seq_diff = align_sequence(muscle_exe, contig)
contig_seq_diff.find_segment() # avoid having long gaps at 5' and 3' (not actual gaps but due to the alignment)
contig_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_seqdiff = align_sequence(muscle_exe, contig)
incoming_seqdiff.find_segment()
contig_seq_diff.diff_list.extend(incoming_seqdiff.diff_list)
contig_seq_diff.regions.append([incoming_seqdiff.start, incoming_seqdiff.end])
print "\nSequence haplogroup assignment\n"
seq_classify = h_analysis(htrees, contig_seq_diff, contig_seq_diff.regions, mhcs_dict)
seq_classify.sample_name = sample_name
#print "\nSequence functional annotation\n"
print "Contig alignment to MHCS and rCRS"
m = list(seq_classify.mhcss)[0]
print "Aligning contigs to MHCS SeqDiff object"
its_mhcs = datatypes.Sequence(m, mhcs_dict[m])
#contig_mhcs_total_seqdiff = []
for x, contig in enumerate(contig_array):
if x == 0:
contig_mhcs_seq_diff = align_sequence(muscle_exe, contig, its_mhcs)
contig_mhcs_seq_diff.find_segment()
contig_mhcs_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_mhcs_seqdiff = align_sequence(muscle_exe, contig, its_mhcs)
incoming_mhcs_seqdiff.find_segment()
contig_mhcs_seq_diff.diff_list.extend(incoming_mhcs_seqdiff.diff_list)
contig_mhcs_seq_diff.regions.append([incoming_mhcs_seqdiff.start, incoming_mhcs_seqdiff.end])
print "rCRS SeqDiff object"
rcrs = datatypes.Sequence('rCRS', consts.rcrs)
#contig_rcrs_total_seqdiff = []
for x, contig in enumerate(contig_array):
if x == 0:
contig_rcrs_seq_diff = align_sequence(muscle_exe, contig, rcrs)
contig_rcrs_seq_diff.find_segment()
contig_rcrs_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_rcrs_seqdiff = align_sequence(muscle_exe, contig, rcrs)
incoming_rcrs_seqdiff.find_segment()
contig_rcrs_seq_diff.diff_list.extend(incoming_rcrs_seqdiff.diff_list)
contig_rcrs_seq_diff.regions.append([incoming_rcrs_seqdiff.start, incoming_rcrs_seqdiff.end])
# try gathering diff from reference sequences
#print "type(seq_diff) is", type(seq_diff.diff_list)
print "Merging seq_diffs..."
mergedtables = merge_tables(contig_seq_diff.diff_list, contig_mhcs_seq_diff.diff_list, contig_rcrs_seq_diff.diff_list)
#print mergedtables
# OUTPUTS
write_output(seq_classify, contig_seq_diff.diff_list, contig_mhcs_seq_diff.diff_list, contig_rcrs_seq_diff.diff_list, mergedtables, basename)
#open(os.path.join(folder,'mt_classification_best_results'), 'a').write(','.join([seq_diff.obj.name, ';'.join([i[0] for i in class_obj.haplo_best.items()])])+'\n')
#open(os.path.join('../', best_results_file), 'a').write(','.join([seq_classify.sample_name, ';'.join([i[0] for i in seq_classify.haplo_best.items()])])+'\n')
open(os.path.join('../', best_results_file), 'a').write(','.join([basename, ';'.join([i[0] for i in seq_classify.haplo_best.items()])])+'\n')
#align_cmd = '%s -D %s -d %s -c chrRSRS -f 9 -B 5 -t 2 %s > %s.coords' % (gmapexe, gmapdb, mtdb, contig_file, basename)
#print align_cmd
# os.system(align_cmd) DON'T YOU FORGET ABOUT ME!!!
# Parsing gmap output
#mutations, contigs_mappings = parse_gmapf9_file(open("%s.coords" % basename, 'r'))
#print "mutations, ", mutations
#print "contig mappings: "
#for i in contigs_mappings:
# print i
if __name__ == "__main__":
main_mt_hpred()
# path = os.getcwd()
# for infile in glob.glob(os.path.join(path, 'OUT_*')):
# main_mt_hpred()
# print "\nHERE COMES THE FUNCTIONAL ANNOTATION...\n"
# path = os.getcwd()
# for infile in glob.glob(os.path.join(path, folder, '*', '*_merged_diff.csv')):
# (PATH, FILENAME) = os.path.split(infile)
# print infile
# diff_file = infile
# file_file = os.path.join(data_file, 'patho_table.txt')
# site_file = os.path.join(data_file, 'sitevar_modified.txt')
# bestres_file = os.path.join(path, 'mt_classification_best_results')
# haptab_file = os.path.join(data_file, 'haplogroups.txt')
# variants_functional_annotation.main_functional_analysis(diff_file, file_file, site_file, bestres_file, haptab_file, PATH, FILENAME)
| gpl-3.0 |
ianrust/coinbase_autotrader | automated_bittrader.py | 1 | 4459 | import json,urllib2,csv,time,smtplib,string,os
os.chdir('/home/ian/Documents')
# Buy and sell urls
sell_url = "https://coinbase.com/api/v1/sells"
buy_url = "https://coinbase.com/api/v1/buys"
sell_price_url = "https://coinbase.com/api/v1/prices/sell"
buy_price_url = "https://coinbase.com/api/v1/prices/buy"
headers = {'content-type': 'application/json'}
price_payload={'qty':1.0}
# gmail login info
gmailUser='iancharlesrust@gmail.com'
gmailPassword='' #password omitting *facepalm*
#function for interacting with coinbase
def req_and_ret(url,req_input,header,url_type='GET'):
if url_type=='POST':
url = urllib2.Request(url, json.dumps(req_input), header)
f = urllib2.urlopen(url)
json_response = f.read()
list_response = json.loads(json_response)
f.close()
return list_response,json_response
#Reading in current state
with open('trader_state.csv','r') as trader_state:
trader_state_csv=csv.reader(trader_state,delimiter=',')
for line in trader_state_csv:
if line[0]=='api_key':
vars()[line[0]]=line[1]
else:
vars()[line[0]]=float(line[1])
trader_state.close()
#Get Current Bitcoin Prices for buy/sell
buy_price_response,throwaway = req_and_ret(buy_price_url,price_payload,headers)
buy_price=buy_price_response['subtotal']['amount']
sell_price_response,throwaway = req_and_ret(sell_price_url,price_payload,headers)
sell_price=sell_price_response['subtotal']['amount']
# Assembling Message
transaction_payload = {'api_key':api_key,'qty':amount_to_trade}
# Decide to make transaction
transaction_type=''
make_transaction=False
current_unix_time=time.time()
if current_unix_time-time_between_transactions>last_transaction_time:
#decide on type of transaction
if coins==amount_to_trade and sell_price>=(1.0+percent_swing)*last_price:
transaction_type='sell'
make_transaction=True
elif coins==0 and buy_price<=(1.0-percent_swing)*last_price:
transaction_type='buy'
make_transaction=True
#transact
success=False
transaction_response={'success':'False'}
trans_resp_string=''
last_price_new=last_price
coins_new=coins
if make_transaction:
if transaction_type=='sell':
transaction_response,trans_resp_string=req_and_ret(sell_url,transaction_payload,headers,'POST')
coins_new=0
last_price_new=sell_price
else:
transaction_response,trans_resp_string=req_and_ret(buy_url,transaction_payload,headers,'POST')
coins_new=amount_to_trade
last_price_new=buy_price
success=transaction_response['success']
errors=''
if not success:
errors=transaction_response['errors']
# if there are problems, send an email to Ian Rust. Likewise, if there is a succesful transaction, tell Ian Rust
subject=""
to_addr="iancharlesrust@gmail.com"
from_addr="autobittrader@rpi.com"
text=''
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
if make_transaction:
if not success:
subject="Got Problems With Your Bitcoin Trader"
text="Hello Sir \n\n I just had trouble making an api based "+transaction_type+" bitcoin transaction on coinbase. Coinbase gave the following error: \r\n "+str(errors)+"\r\n You have 1 day from the time these email was sent to fix the problem. \n\n Yours Truly, \n\n RPI BitTrader \r\n PS This is the whole response: \r\n" +str(trans_resp_string)
else:
subject="Successful "+transaction_type+" On the Part of Your Bitcoin Trader"
text="Hello Sir \n\n I just made a "+transaction_type+" order successfully on coinbase. \r\n The price was "+str(last_price)+" for "+str(amount_to_trade)+"BTC \n\n Yours Truly, \n\n RPI BitTrader"
body=string.join(("From: %s" % from_addr,"To: %s" % to_addr,"Subject: %s" % subject ,"",text), "\r\n")
mailServer.sendmail(from_addr, [to_addr], body)
mailServer.close()
# record the state
with open('trader_state.csv','w') as trader_state:
last_transaction_time_towrite=last_transaction_time
last_price_towrite=last_price
coins_towrite=coins
if make_transaction and success:
last_transaction_time_towrite=current_unix_time
last_price_towrite=last_price_new
coins_towrite=coins_new
trader_state.write('last_price,'+str(last_price_towrite)+'\nlast_transaction_time,'+str(int(last_transaction_time_towrite))+'\ncoins,'+str(coins_towrite)+'\namount_to_trade,'+str(amount_to_trade)+'\npercent_swing,'+str(percent_swing)+'\ntime_between_transactions,'+str(time_between_transactions)+'\napi_key,'+str(api_key)+'\nlast_check_time,'+str(int(current_unix_time)))
| mit |
kvar/ansible | lib/ansible/modules/windows/win_toast.py | 21 | 3274 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_toast
version_added: "2.4"
short_description: Sends Toast windows notification to logged in users on Windows 10 or later hosts
description:
- Sends alerts which appear in the Action Center area of the windows desktop.
options:
expire:
description:
- How long in seconds before the notification expires.
type: int
default: 45
group:
description:
- Which notification group to add the notification to.
type: str
default: Powershell
msg:
description:
- The message to appear inside the notification.
- May include \n to format the message to appear within the Action Center.
type: str
default: Hello, World!
popup:
description:
- If C(no), the notification will not pop up and will only appear in the Action Center.
type: bool
default: yes
tag:
description:
- The tag to add to the notification.
type: str
default: Ansible
title:
description:
- The notification title, which appears in the pop up..
type: str
default: Notification HH:mm
notes:
- This module must run on a windows 10 or Server 2016 host, so ensure your play targets windows hosts, or delegates to a windows host.
- The module does not fail if there are no logged in users to notify.
- Messages are only sent to the local host where the module is run.
- You must run this module with async, otherwise it will hang until the expire period has passed.
seealso:
- module: win_msg
- module: win_say
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Warn logged in users of impending upgrade (note use of async to stop the module from waiting until notification expires).
win_toast:
expire: 60
title: System Upgrade Notification
msg: Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}
async: 60
poll: 0
'''
RETURN = r'''
expire_at_utc:
description: Calculated utc date time when the notification expires.
returned: always
type: str
sample: 07 July 2017 04:50:54
no_toast_sent_reason:
description: Text containing the reason why a notification was not sent.
returned: when no logged in users are detected
type: str
sample: No logged in users to notify
sent_localtime:
description: local date time when the notification was sent.
returned: always
type: str
sample: 07 July 2017 05:45:54
time_taken:
description: How long the module took to run on the remote windows host in seconds.
returned: always
type: float
sample: 0.3706631999999997
toast_sent:
description: Whether the module was able to send a toast notification or not.
returned: always
type: bool
sample: false
'''
| gpl-3.0 |
pombredanne/pulp | server/pulp/plugins/conduits/repo_sync.py | 7 | 8610 | """
Contains the definitions for all classes related to the importer's API for
interacting with the Pulp server during a repo sync.
Plugin implementations for repository sync will obviously vary wildly. For help
in understanding the APIs, below is a short outline of a common sync process and
its calls into this conduit:
1. Call get_units to understand what units are already associated with the
repository being synchronized.
2. For each new unit to add to the Pulp server and associate with the repository,
the plugin takes the following steps.:
a. Calls init_unit which takes unit specific metadata and allows Pulp to
populate any calculated/derived values for the unit. The result of this
call is an object representation of the unit.
b. Uses the storage_path field in the returned unit to save the bits for the
unit to disk.
c. Calls save_unit which creates/updates Pulp's knowledge of the content unit
and creates an association between the unit and the repository
d. If necessary, calls link_unit to establish any relationships between units.
3. For units previously associated with the repository (known from get_units)
that should no longer be, calls remove_unit to remove that association.
Throughout the sync process, the set_progress call can be used to update the
Pulp server on the status of the sync. Pulp will make this information available
to users.
"""
from gettext import gettext as _
import logging
import sys
from pulp.plugins.conduits.mixins import (
ImporterConduitException, AddUnitMixin, RepoScratchPadMixin,
ImporterScratchPadMixin, SingleRepoUnitsMixin, StatusMixin,
SearchUnitsMixin)
from pulp.plugins.model import SyncReport
import pulp.server.managers.factory as manager_factory
_logger = logging.getLogger(__name__)
class RepoSyncConduit(RepoScratchPadMixin, ImporterScratchPadMixin, AddUnitMixin,
SingleRepoUnitsMixin, StatusMixin, SearchUnitsMixin):
"""
Used to communicate back into the Pulp server while an importer performs
a repo sync. Instances of this class should *not* be cached between repo
sync runs. Each sync will be issued its own conduit instance that is scoped
to that run of the sync alone.
Instances of this class are thread-safe. The importer implementation is
allowed to do whatever threading makes sense to optimize its sync process.
Calls into this instance do not have to be coordinated for thread safety,
the instance will take care of it itself.
"""
def __init__(self, repo_id, importer_id):
RepoScratchPadMixin.__init__(self, repo_id, ImporterConduitException)
ImporterScratchPadMixin.__init__(self, repo_id, importer_id)
AddUnitMixin.__init__(self, repo_id, importer_id)
SingleRepoUnitsMixin.__init__(self, repo_id, ImporterConduitException)
StatusMixin.__init__(self, importer_id, ImporterConduitException)
SearchUnitsMixin.__init__(self, ImporterConduitException)
self._association_manager = manager_factory.repo_unit_association_manager()
self._content_query_manager = manager_factory.content_query_manager()
self._removed_count = 0
def __str__(self):
return _('RepoSyncConduit for repository [%(r)s]') % {'r': self.repo_id}
def remove_unit(self, unit):
"""
Removes the association between the given content unit and the repository
being synchronized.
This call will only remove the association owned by this importer
between the repository and unit. If the unit was manually associated by
a user, the repository will retain that instance of the association.
This call does not delete Pulp's representation of the unit in its
database. If this call removes the final association of the unit to a
repository, the unit will become "orphaned" and will be deleted from
Pulp outside of this plugin.
Units passed to this call must have their id fields set by the Pulp server.
This call is idempotent. If no association, owned by this importer, exists
between the unit and repository, this call has no effect.
@param unit: unit object (must have its id value set)
@type unit: L{Unit}
"""
try:
self._association_manager.unassociate_unit_by_id(
self.repo_id, unit.type_id, unit.id)
self._removed_count += 1
except Exception, e:
_logger.exception(_('Content unit unassociation failed'))
raise ImporterConduitException(e), None, sys.exc_info()[2]
def associate_existing(self, unit_type_id, search_dicts):
"""
Associates existing units with a repo
:param unit_type_id: unit type id
:type unit_type_id: str
:param search_dicts: search dicts for units to associate with repo
(example: list of unit key dicts)
:type search_dicts: list of dicts
"""
unit_ids = self._content_query_manager.get_content_unit_ids(unit_type_id, search_dicts)
self._association_manager.associate_all_by_ids(self.repo_id, unit_type_id, unit_ids)
def build_success_report(self, summary, details):
"""
Creates the SyncReport instance that needs to be returned to the Pulp
server at the end of a successful sync_repo call.
The added, updated, and removed unit count fields will be populated with
the tracking counters maintained by the conduit based on calls into it.
If these are inaccurate for a given plugin's implementation, the counts
can be changed in the returned report before returning it to Pulp.
@param summary: short log of the sync; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the sync; may be None
@type details: any serializable
"""
r = SyncReport(True, self._added_count, self._updated_count,
self._removed_count, summary, details)
return r
def build_failure_report(self, summary, details):
"""
Creates the SyncReport instance that needs to be returned to the Pulp
server at the end of a sync_repo call. The report built in this fashion
will indicate the sync has gracefully failed (as compared to an
unexpected exception bubbling up).
The added, updated, and removed unit count fields will be populated with
the tracking counters maintained by the conduit based on calls into it.
If these are inaccurate for a given plugin's implementation, the counts
can be changed in the returned report before returning it to Pulp. This
data will capture how far it got before building the report and should
be overridden if the plugin attempts to do some form of rollback due to
the encountered error.
@param summary: short log of the sync; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the sync; may be None
@type details: any serializable
"""
r = SyncReport(False, self._added_count, self._updated_count,
self._removed_count, summary, details)
return r
def build_cancel_report(self, summary, details):
"""
Creates the SyncReport instance that needs to be returned to the Pulp
server at the end of a sync_repo call. The report built in this fashion
will indicate the sync has been cancelled.
The added, updated, and removed unit count fields will be populated with
the tracking counters maintained by the conduit based on calls into it.
If these are inaccurate for a given plugin's implementation, the counts
can be changed in the returned report before returning it to Pulp. This
data will capture how far it got before building the report and should
be overridden if the plugin attempts to do some form of rollback due to
the cancellation.
@param summary: short log of the sync; may be None but probably shouldn't be
@type summary: any serializable
@param details: potentially longer log of the sync; may be None
@type details: any serializable
"""
r = SyncReport(False, self._added_count, self._updated_count,
self._removed_count, summary, details)
r.canceled_flag = True
return r
| gpl-2.0 |
40223208/CDB-Final- | static/Brython3.1.1-20150328-091302/Lib/test/regrtest.py | 718 | 65317 | #! /usr/bin/python3.3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=0, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match=', 'next='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = 1
elif o == '--next':
single = int(a)
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_tracebacks_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_tracebacks_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single != 1 or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
first_selected = selected[0]
index_selected = alltests.index(first_selected)
if index_selected + single > len(alltests):
single = len(alltests) - index_selected
selected = alltests[index_selected:index_selected+single]
try:
next_single_test = alltests[index_selected+single]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_tracebacks_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_tracebacks_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
| gpl-3.0 |
qwefi/nova | nova/api/openstack/compute/contrib/__init__.py | 25 | 1608 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contrib contains extensions that are shipped with nova.
It can't be called 'extensions' because that causes namespacing problems.
"""
from oslo.config import cfg
from nova.api.openstack import extensions
from nova.openstack.common import log as logging
ext_opts = [
cfg.ListOpt('osapi_compute_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'compute_extension option with nova.api.openstack.'
'compute.contrib.select_extensions'),
]
CONF = cfg.CONF
CONF.register_opts(ext_opts)
LOG = logging.getLogger(__name__)
def standard_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
CONF.osapi_compute_ext_list)
| apache-2.0 |
cloud9UG/odoo | openerp/osv/fields.py | 20 | 74783 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from operator import itemgetter
from contextlib import contextmanager
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_repr, float_round, frozendict, html_sanitize
import simplejson
from openerp import SUPERUSER_ID, registry
@contextmanager
def _get_cursor():
# yield a valid cursor from any environment or create a new one if none found
from openerp.api import Environment
from openerp.http import request
try:
request.env # force request's env to be computed
except RuntimeError:
pass # ignore if not in a request
for env in Environment.envs:
if not env.cr.closed:
yield env.cr
break
else:
with registry().cursor() as cr:
yield cr
EMPTY_DICT = frozendict()
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
__slots__ = [
'copy', # whether value is copied by BaseModel.copy()
'string',
'help',
'required',
'readonly',
'_domain',
'_context',
'states',
'priority',
'change_default',
'size',
'ondelete',
'translate',
'select',
'manual',
'write',
'read',
'selectable',
'group_operator',
'groups', # CSV list of ext IDs of groups
'deprecated', # Optional deprecation warning
'_args',
'_prefetch',
]
def __init__(self, string='unknown', required=False, readonly=False, domain=[], context={}, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
# add parameters and default values
args['copy'] = args.get('copy', True)
args['string'] = string
args['help'] = args.get('help', '')
args['required'] = required
args['readonly'] = readonly
args['_domain'] = domain
args['_context'] = context
args['states'] = states
args['priority'] = priority
args['change_default'] = change_default
args['size'] = size
args['ondelete'] = ondelete.lower() if ondelete else None
args['translate'] = translate
args['select'] = select
args['manual'] = manual
args['write'] = args.get('write', False)
args['read'] = args.get('read', False)
args['selectable'] = args.get('selectable', True)
args['group_operator'] = args.get('group_operator', None)
args['groups'] = args.get('groups', None)
args['deprecated'] = args.get('deprecated', None)
args['_prefetch'] = args.get('_prefetch', True)
self._args = EMPTY_DICT
for key, val in args.iteritems():
setattr(self, key, val)
# prefetch only if _classic_write, not deprecated and not manual
if not self._classic_write or self.deprecated or self.manual:
self._prefetch = False
def __getattr__(self, name):
""" Access a non-slot attribute. """
if name == '_args':
raise AttributeError(name)
try:
return self._args[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set a slot or non-slot attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._args:
self._args[name] = value
else:
self._args = {name: value} # replace EMPTY_DICT
def __delattr__(self, name):
""" Remove a non-slot attribute. """
try:
del self._args[name]
except KeyError:
raise AttributeError(name)
def new(self, _computed_field=False, **args):
""" Return a column like `self` with the given parameters; the parameter
`_computed_field` tells whether the corresponding field is computed.
"""
# memory optimization: reuse self whenever possible; you can reduce the
# average memory usage per registry by 10 megabytes!
column = type(self)(**args)
return self if self.to_field_args() == column.to_field_args() else column
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](column=self, **self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
base_items = [
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
truthy_items = filter(itemgetter(1), [
('group_operator', self.group_operator),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
])
return dict(base_items + truthy_items + self._args.items())
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
__slots__ = ['selection']
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
__slots__ = ['_symbol_f', '_symbol_set', '_symbol_set_char']
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
__slots__ = []
class html(text):
_type = 'html'
_symbol_c = '%s'
__slots__ = ['_sanitize', '_strip_style', '_symbol_f', '_symbol_set']
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value, strip_style=self._strip_style)
def __init__(self, string='unknown', sanitize=True, strip_style=False, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
self._strip_style = strip_style
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
def _symbol_set_float(self, x):
result = __builtin__.float(x or 0.0)
digits = self.digits
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_get = lambda self,x: x or 0.0
__slots__ = ['_digits', '_digits_compute', '_symbol_f', '_symbol_set']
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
# synopsis: digits_compute(cr) -> (precision, scale)
self._digits = digits
self._digits_compute = digits_compute
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self._digits_compute or self._digits
return args
def digits_change(self, cr):
pass
class date(_column):
_type = 'date'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
value = datetime.context_timestamp(cr, uid, DT.datetime.strptime(value, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
return tools.ustr(value.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))
class binary(_column):
_type = 'binary'
_classic_read = False
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_c = '%s'
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
__slots__ = ['filters']
def __init__(self, string='unknown', filters=None, **args):
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, filters=filters, **args)
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
__slots__ = ['selection']
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, selection=selection, **args)
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = ['_obj', '_auto_join']
def __init__(self, obj, string='unknown', auto_join=False, **args):
args['ondelete'] = args.get('ondelete', 'set null')
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_type = 'one2many'
__slots__ = ['_obj', '_fields_id', '_limit', '_auto_join']
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
# one2many columns are not copied by default
args['copy'] = args.get('copy', False)
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
if not values:
return
obj = obj.pool[self._obj]
rec = obj.browse(cr, user, [], context=context)
with rec.env.norecompute():
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# check whether the given record is already linked
rec = obj.browse(cr, SUPERUSER_ID, act[1], {'prefetch_fields': False})
if int(rec[self._fields_id]) != id:
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_type = 'many2many'
__slots__ = ['_obj', '_rel', '_id1', '_id2', '_limit', '_auto_join']
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
self._auto_join = False
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
order_by = obj._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
if size < 12: # suppose human size
return value
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_properties = True
__slots__ = [
'_type',
'_classic_read',
'_classic_write',
'_symbol_c',
'_symbol_f',
'_symbol_set',
'_symbol_get',
'_fnct',
'_arg',
'_fnct_inv',
'_fnct_inv_arg',
'_fnct_search',
'_multi',
'store',
'_digits',
'_digits_compute',
'selection',
'_obj',
]
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
self._classic_read = False
self._classic_write = False
self._prefetch = False
self._symbol_c = '%s'
self._symbol_f = _symbol_set
self._symbol_set = (self._symbol_c, self._symbol_f)
self._symbol_get = None
# pop attributes that should not be assigned to self
self._digits = args.pop('digits', (16,2))
self._digits_compute = args.pop('digits_compute', None)
self._obj = args.pop('relation', obj)
# function fields are not copied by default
args['copy'] = args.get('copy', False)
_column.__init__(self, **args)
self._type = type
self._fnct = fnct
self._arg = arg
self._fnct_inv = fnct_inv
self._fnct_inv_arg = fnct_inv_arg
self._fnct_search = fnct_search
self.store = store
self._multi = multi
if not fnct_inv:
self.readonly = 1
if not fnct_search and not store:
self.selectable = False
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
elif type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def new(self, _computed_field=False, **args):
if _computed_field:
# field is computed, we need an instance of a non-function column
type_class = globals()[self._type]
return type_class(**args)
else:
# HACK: function fields are tricky to recreate, simply return a copy
import copy
return copy.copy(self)
def to_field_args(self):
args = super(function, self).to_field_args()
args['store'] = bool(self.store)
if self._type in ('float',):
args['digits'] = self._digits_compute or self._digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
pass
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
__slots__ = ['arg', '_relations']
def _related_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _related_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _related_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._related_read, arg, self._related_write, fnct_inv_arg=arg, fnct_search=self._related_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
__slots__ = ['serialization_field']
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
if not value:
return []
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _sparse_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _sparse_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._sparse_read, fnct_inv=self._sparse_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
__slots__ = ['arg', '_relations']
def _dummy_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _dummy_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _dummy_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._dummy_read, arg, self._dummy_write, fnct_inv_arg=arg, fnct_search=self._dummy_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
_type = 'serialized'
__slots__ = []
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
def __init__(self, *args, **kwargs):
kwargs['_prefetch'] = kwargs.get('_prefetch', False)
super(serialized, self).__init__(*args, **kwargs)
# TODO: review completly this class for speed improvement
class property(function):
__slots__ = []
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _property_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _property_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _property_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
field = obj._fields[prop_name]
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if field.type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[field.comodel_name].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._property_read,
fnct_inv=self._property_write,
fnct_search=self._property_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
__slots__ = ['name', 'column', 'parent_model', 'parent_column', 'original_parent']
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EmanueleCannizzaro/scons | src/engine/SCons/Tool/JavaCommonTests.py | 1 | 14898 | #
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommonTests.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import sys
import unittest
import TestUnit
import SCons.Scanner.IDL
import SCons.Tool.JavaCommon
# Adding trace=trace to any of the parse_jave() calls below will cause
# the parser to spit out trace messages of the tokens it sees and the
# attendant transitions.
def trace(token, newstate):
from SCons.Debug import Trace
statename = newstate.__class__.__name__
Trace('token = %s, state = %s\n' % (repr(token), statename))
class parse_javaTestCase(unittest.TestCase):
def test_bare_bones(self):
"""Test a bare-bones class"""
input = """\
package com.sub.bar;
public class Foo
{
public static void main(String[] args)
{
/* This tests a former bug where strings would eat later code. */
String hello1 = new String("Hello, world!");
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == os.path.join('com', 'sub', 'bar'), pkg_dir
assert classes == ['Foo'], classes
def test_dollar_sign(self):
"""Test class names with $ in them"""
input = """\
public class BadDep {
public void new$rand () {}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['BadDep'], classes
def test_inner_classes(self):
"""Test parsing various forms of inner classes"""
input = """\
class Empty {
}
interface Listener {
public void execute();
}
public
class
Test implements Listener {
class Inner {
void go() {
use(new Listener() {
public void execute() {
System.out.println("In Inner");
}
});
}
String s1 = "class A";
String s2 = "new Listener() { }";
/* class B */
/* new Listener() { } */
}
class Inner2 {
Inner2() { Listener l = new Listener(); }
}
/* Make sure this class doesn't get interpreted as an inner class of the previous one, when "new" is used in the previous class. */
class Inner3 {
}
public static void main(String[] args) {
new Test().run();
}
void run() {
use(new Listener() {
public void execute() {
use(new Listener( ) {
public void execute() {
System.out.println("Inside execute()");
}
});
}
});
new Inner().go();
}
void use(Listener l) {
l.execute();
}
}
class Private {
void run() {
new Listener() {
public void execute() {
}
};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$2',
'Test$3',
'Test',
'Private$1',
'Private',
]
assert classes == expect, classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$Inner$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$1',
'Test$1$1',
'Test',
'Private$1',
'Private',
]
assert classes == expect, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '5')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$Inner$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$1',
'Test$1$1',
'Test',
'Private$1',
'Private',
]
assert classes == expect, (expect, classes)
def test_comments(self):
"""Test a class with comments"""
input = """\
package com.sub.foo;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.rmi.RMISecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example1 extends UnicastRemoteObject implements Hello {
public Example1() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new RMISecurityManager());
}
// a comment
try {
Example1 obj = new Example1();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example1 err: " + e.getMessage());
e.printStackTrace();
}
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == os.path.join('com', 'sub', 'foo'), pkg_dir
assert classes == ['Example1'], classes
def test_arrays(self):
"""Test arrays of class instances"""
input = """\
public class Test {
MyClass abc = new MyClass();
MyClass xyz = new MyClass();
MyClass _array[] = new MyClass[] {
abc,
xyz
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['Test'], classes
def test_backslash(self):
"""Test backslash handling"""
input = """\
public class MyTabs
{
private class MyInternal
{
}
private final static String PATH = "images\\\\";
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['MyTabs$MyInternal', 'MyTabs'], classes
def test_enum(self):
"""Test the Java 1.5 enum keyword"""
input = """\
package p;
public enum a {}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == 'p', pkg_dir
assert classes == ['a'], classes
def test_anon_classes(self):
"""Test anonymous classes"""
input = """\
public abstract class TestClass
{
public void completed()
{
new Thread()
{
}.start();
new Thread()
{
}.start();
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['TestClass$1', 'TestClass$2', 'TestClass'], classes
def test_closing_bracket(self):
"""Test finding a closing bracket instead of an anonymous class"""
input = """\
class TestSCons {
public static void main(String[] args) {
Foo[] fooArray = new Foo[] { new Foo() };
}
}
class Foo { }
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['TestSCons', 'Foo'], classes
def test_dot_class_attributes(self):
"""Test handling ".class" attributes"""
input = """\
public class Test extends Object
{
static {
Class c = Object[].class;
Object[] s = new Object[] {};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert classes == ['Test'], classes
input = """\
public class A {
public class B {
public void F(Object[] o) {
F(new Object[] {Object[].class});
}
public void G(Object[] o) {
F(new Object[] {});
}
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['A$B', 'A'], classes
def test_anonymous_classes_with_parentheses(self):
"""Test finding anonymous classes marked by parentheses"""
input = """\
import java.io.File;
public class Foo {
public static void main(String[] args) {
File f = new File(
new File("a") {
public String toString() {
return "b";
}
} to String()
) {
public String toString() {
return "c";
}
};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert classes == ['Foo$1', 'Foo$2', 'Foo'], classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert classes == ['Foo$1', 'Foo$1$1', 'Foo'], classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
assert classes == ['Foo$1', 'Foo$1$1', 'Foo'], classes
def test_nested_anonymous_inner_classes(self):
"""Test finding nested anonymous inner classes"""
input = """\
// import java.util.*;
public class NestedExample
{
public NestedExample()
{
Thread t = new Thread() {
public void start()
{
Thread t = new Thread() {
public void start()
{
try {Thread.sleep(200);}
catch (Exception e) {}
}
};
while (true)
{
try {Thread.sleep(200);}
catch (Exception e) {}
}
}
};
}
public static void main(String argv[])
{
NestedExample e = new NestedExample();
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
expect = [ 'NestedExample$1', 'NestedExample$2', 'NestedExample' ]
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
expect = [ 'NestedExample$1', 'NestedExample$1$1', 'NestedExample' ]
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
expect = [ 'NestedExample$1', 'NestedExample$1$1', 'NestedExample' ]
assert expect == classes, (expect, classes)
def test_private_inner_class_instantiation(self):
"""Test anonymous inner class generated by private instantiation"""
input = """\
class test
{
test()
{
super();
new inner();
}
static class inner
{
private inner() {}
}
}
"""
# This is what we *should* generate, apparently due to the
# private instantiation of the inner class, but don't today.
#expect = [ 'test$1', 'test$inner', 'test' ]
# What our parser currently generates, which doesn't match
# what the Java compiler actually generates.
expect = [ 'test$inner', 'test' ]
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert expect == classes, (expect, classes)
def test_floating_point_numbers(self):
"""Test floating-point numbers in the input stream"""
input = """
// Broken.java
class Broken
{
/**
* Detected.
*/
Object anonymousInnerOK = new Runnable() { public void run () {} };
/**
* Detected.
*/
class InnerOK { InnerOK () { } }
{
System.out.println("a number: " + 1000.0 + "");
}
/**
* Not detected.
*/
Object anonymousInnerBAD = new Runnable() { public void run () {} };
/**
* Not detected.
*/
class InnerBAD { InnerBAD () { } }
}
"""
expect = ['Broken$1', 'Broken$InnerOK', 'Broken$2', 'Broken$InnerBAD', 'Broken']
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert expect == classes, (expect, classes)
def test_genercis(self):
"""Test that generics don't interfere with detecting anonymous classes"""
input = """\
import java.util.Date;
import java.util.Comparator;
public class Foo
{
public void foo()
{
Comparator<Date> comp = new Comparator<Date>()
{
static final long serialVersionUID = 1L;
public int compare(Date lhs, Date rhs)
{
return 0;
}
};
}
}
"""
expect = [ 'Foo$1', 'Foo' ]
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.6')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
assert expect == classes, (expect, classes)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ parse_javaTestCase ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
procangroup/edx-platform | lms/djangoapps/shoppingcart/tests/payment_fake.py | 24 | 9650 | # -*- coding: utf-8 -*-
"""
Fake payment page for use in acceptance tests.
This view is enabled in the URLs by the feature flag `ENABLE_PAYMENT_FAKE`.
Note that you will still need to configure this view as the payment
processor endpoint in order for the shopping cart to use it:
settings.CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = "/shoppingcart/payment_fake"
You can configure the payment to indicate success or failure by sending a PUT
request to the view with param "success"
set to "success" or "failure". The view defaults to payment success.
"""
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
from edxmako.shortcuts import render_to_response
# We use the same hashing function as the software under test,
# because it mainly uses standard libraries, and I want
# to avoid duplicating that code.
from shoppingcart.processors.CyberSource2 import processor_hash
class PaymentFakeView(View):
"""
Fake payment page for use in acceptance tests.
"""
# We store the payment status to respond with in a class
# variable. In a multi-process Django app, this wouldn't work,
# since processes don't share memory. Since Lettuce
# runs one Django server process, this works for acceptance testing.
PAYMENT_STATUS_RESPONSE = "success"
@csrf_exempt
def dispatch(self, *args, **kwargs):
"""
Disable CSRF for these methods.
"""
return super(PaymentFakeView, self).dispatch(*args, **kwargs)
def post(self, request):
"""
Render a fake payment page.
This is an HTML form that:
* Triggers a POST to `postpay_callback()` on submit.
* Has hidden fields for all the data CyberSource sends to the callback.
- Most of this data is duplicated from the request POST params (e.g. `amount`)
- Other params contain fake data (always the same user name and address.
- Still other params are calculated (signatures)
* Serves an error page (HTML) with a 200 status code
if the signatures are invalid. This is what CyberSource does.
Since all the POST requests are triggered by HTML forms, this is
equivalent to the CyberSource payment page, even though it's
served by the shopping cart app.
"""
if self._is_signature_valid(request.POST):
return self._payment_page_response(request.POST)
else:
return render_to_response('shoppingcart/test/fake_payment_error.html')
def put(self, request):
"""
Set the status of payment requests to success or failure.
Accepts one POST param "status" that can be either "success"
or "failure".
"""
new_status = request.body
if new_status not in ["success", "failure", "decline"]:
return HttpResponseBadRequest()
else:
# Configure all views to respond with the new status
PaymentFakeView.PAYMENT_STATUS_RESPONSE = new_status
return HttpResponse()
@staticmethod
def _is_signature_valid(post_params):
"""
Return a bool indicating whether the client sent
us a valid signature in the payment page request.
"""
# Retrieve the list of signed fields
signed_fields = post_params.get('signed_field_names').split(',')
# Calculate the public signature
hash_val = ",".join([
"{0}={1}".format(key, post_params[key])
for key in signed_fields
])
public_sig = processor_hash(hash_val)
return public_sig == post_params.get('signature')
@classmethod
def response_post_params(cls, post_params):
"""
Calculate the POST params we want to send back to the client.
"""
if cls.PAYMENT_STATUS_RESPONSE == "success":
decision = "ACCEPT"
elif cls.PAYMENT_STATUS_RESPONSE == "decline":
decision = "DECLINE"
else:
decision = "REJECT"
resp_params = {
# Indicate whether the payment was successful
"decision": decision,
# Reflect back parameters we were sent by the client
"req_amount": post_params.get('amount'),
"auth_amount": post_params.get('amount'),
"req_reference_number": post_params.get('reference_number'),
"req_transaction_uuid": post_params.get('transaction_uuid'),
"req_access_key": post_params.get('access_key'),
"req_transaction_type": post_params.get('transaction_type'),
"req_override_custom_receipt_page": post_params.get('override_custom_receipt_page'),
"req_payment_method": post_params.get('payment_method'),
"req_currency": post_params.get('currency'),
"req_locale": post_params.get('locale'),
"signed_date_time": post_params.get('signed_date_time'),
# Fake data
"req_bill_to_address_city": "Boston",
"req_card_number": "xxxxxxxxxxxx1111",
"req_bill_to_address_state": "MA",
"req_bill_to_address_line1": "123 Fake Street",
"utf8": u"✓",
"reason_code": "100",
"req_card_expiry_date": "01-2018",
"req_bill_to_forename": "John",
"req_bill_to_surname": "Doe",
"auth_code": "888888",
"req_bill_to_address_postal_code": "02139",
"message": "Request was processed successfully.",
"auth_response": "100",
"auth_trans_ref_no": "84997128QYI23CJT",
"auth_time": "2014-08-18T110622Z",
"bill_trans_ref_no": "84997128QYI23CJT",
"auth_avs_code": "X",
"req_bill_to_email": "john@example.com",
"auth_avs_code_raw": "I1",
"req_profile_id": "0000001",
"req_card_type": "001",
"req_bill_to_address_country": "US",
"transaction_id": "4083599817820176195662",
}
# Indicate which fields we are including in the signature
# Order is important
signed_fields = [
'transaction_id', 'decision', 'req_access_key', 'req_profile_id',
'req_transaction_uuid', 'req_transaction_type', 'req_reference_number',
'req_amount', 'req_currency', 'req_locale',
'req_payment_method', 'req_override_custom_receipt_page',
'req_bill_to_forename', 'req_bill_to_surname',
'req_bill_to_email', 'req_bill_to_address_line1',
'req_bill_to_address_city', 'req_bill_to_address_state',
'req_bill_to_address_country', 'req_bill_to_address_postal_code',
'req_card_number', 'req_card_type', 'req_card_expiry_date',
'message', 'reason_code', 'auth_avs_code',
'auth_avs_code_raw', 'auth_response', 'auth_amount',
'auth_code', 'auth_trans_ref_no', 'auth_time',
'bill_trans_ref_no', 'signed_field_names', 'signed_date_time'
]
# if decision is decline , cancel or error then remove auth_amount from signed_field.
# list and also delete from resp_params dict
if decision in ["DECLINE", "CANCEL", "ERROR"]:
signed_fields.remove('auth_amount')
del resp_params["auth_amount"]
# Add the list of signed fields
resp_params['signed_field_names'] = ",".join(signed_fields)
# Calculate the public signature
hash_val = ",".join([
"{0}={1}".format(key, resp_params[key])
for key in signed_fields
])
resp_params['signature'] = processor_hash(hash_val)
return resp_params
def _payment_page_response(self, post_params):
"""
Render the payment page to a response. This is an HTML form
that triggers a POST request to `callback_url`.
The POST params are described in the CyberSource documentation:
http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_WM/Secure_Acceptance_WM.pdf
To figure out the POST params to send to the callback,
we either:
1) Use fake static data (e.g. always send user name "John Doe")
2) Use the same info we received (e.g. send the same `amount`)
3) Dynamically calculate signatures using a shared secret
"""
callback_url = post_params.get('override_custom_receipt_page', '/shoppingcart/postpay_callback/')
# Build the context dict used to render the HTML form,
# filling in values for the hidden input fields.
# These will be sent in the POST request to the callback URL.
post_params_success = self.response_post_params(post_params)
# Build the context dict for decline form,
# remove the auth_amount value from here to
# reproduce exact response coming from actual postback call
post_params_decline = self.response_post_params(post_params)
del post_params_decline["auth_amount"]
post_params_decline["decision"] = 'DECLINE'
context_dict = {
# URL to send the POST request to
"callback_url": callback_url,
# POST params embedded in the HTML success form
'post_params_success': post_params_success,
# POST params embedded in the HTML decline form
'post_params_decline': post_params_decline
}
return render_to_response('shoppingcart/test/fake_payment_page.html', context_dict)
| agpl-3.0 |
wshallum/ansible | lib/ansible/modules/packaging/os/yum_repository.py | 19 | 25228 | #!/usr/bin/python
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add and remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
options:
async:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
bandwidth:
required: false
default: 0
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
baseurl:
required: false
default: null
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- This or the I(mirrorlist) parameter is required if I(state) is set to
C(present).
cost:
required: false
default: 1000
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
deltarpm_metadata_percentage:
required: false
default: 100
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
deltarpm_percentage:
required: false
default: 75
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
description:
required: false
default: null
description:
- A human readable string describing the repository.
- This parameter is only required if I(state) is set to C(present).
enabled:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- This tells yum whether or not use this repository.
enablegroups:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Determines whether yum will allow the use of package groups for this
repository.
exclude:
required: false
default: null
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
failovermethod:
required: false
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
file:
required: false
default: null
description:
- File to use to save the repo in. Defaults to the value of I(name).
gpgcakey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
gpgkey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
http_caching:
required: false
choices: [all, packages, none]
default: all
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
include:
required: false
default: null
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
includepkgs:
required: false
default: null
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
ip_resolve:
required: false
choices: [4, 6, IPv4, IPv6, whatever]
default: whatever
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
keepalive:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
keepcache:
required: false
choices: ['0', '1']
default: '1'
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
metadata_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
metadata_expire_filter:
required: false
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
default: 'read-only:present'
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
metalink:
required: false
default: null
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
mirrorlist:
required: false
default: null
description:
- Specifies a URL to a file containing a list of baseurls.
- This or the I(baseurl) parameter is required if I(state) is set to
C(present).
mirrorlist_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
name:
required: true
description:
- Unique repository ID.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options.
To remove an option, set the value of the option to C(null).
password:
required: false
default: null
description:
- Password to use with the username for basic authentication.
priority:
required: false
default: 99
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
protect:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Protect packages from updates from other repositories.
proxy:
required: false
default: null
description:
- URL to the proxy server that yum should use. Set to C(_none_) to
disable the global proxy setting.
proxy_password:
required: false
default: null
description:
- Username to use for proxy.
proxy_username:
required: false
default: null
description:
- Password for this proxy.
repo_gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
reposdir:
required: false
default: /etc/yum.repos.d
description:
- Directory where the C(.repo) files will be stored.
retries:
required: false
default: 10
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
s3_enabled:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
skip_if_unavailable:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
ssl_check_cert_permissions:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
sslcacert:
required: false
default: null
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
sslclientcert:
required: false
default: null
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
sslclientkey:
required: false
default: null
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
sslverify:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether yum should verify SSL certificates/hosts at all.
state:
required: false
choices: [absent, present]
default: present
description:
- State of the repo file.
throttle:
required: false
default: null
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
timeout:
required: false
default: 30
description:
- Number of seconds to wait for a connection before timing out.
ui_repoid_vars:
required: false
default: releasever basearch
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
username:
required: false
default: null
description:
- Username to use for basic authentication to a repo or really any url.
extends_documentation_fragment:
- files
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
'''
EXAMPLES = '''
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
- name: Remove repository
yum_repository:
name: epel
state: absent
- name: Remove repository from a specific repo file
yum_repository:
name: epel
file: external_repos
state: absent
#
# Allow to overwrite the yum_repository parameters by defining the parameters
# as a variable in the defaults or vars file:
#
# my_role_somerepo_params:
# # Disable GPG checking
# gpgcheck: no
# # Remove the gpgkey option
# gpgkey: null
#
- name: Add Some repo
yum_repository:
name: somerepo
description: Some YUM repo
baseurl: http://server.com/path/to/the/repo
gpgkey: http://server.com/keys/somerepo.pub
gpgcheck: yes
params: "{{ my_role_somerepo_params }}"
'''
RETURN = '''
repo:
description: repository name
returned: success
type: string
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = configparser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
self.module.fail_json(
msg='Paramater "baseurl" or "mirrorlist" is required for '
'adding a new repo.')
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
fd = open(self.params['dest'], 'w')
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open repo file %s." % self.params['dest'],
details=str(e))
self.repofile.write(fd)
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot write repo file %s." % self.params['dest'],
details=str(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError:
e = get_exception()
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=str(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
module = AnsibleModule(
argument_spec=dict(
async=dict(type='bool'),
bandwidth=dict(),
baseurl=dict(),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(),
sslclientkey=dict(),
sslverify=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
),
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
if __name__ == '__main__':
main()
| gpl-3.0 |
bastik/youtube-dl | youtube_dl/extractor/__init__.py | 8 | 21846 | from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import (
ARDIE,
ARDMediathekIE,
SportschauIE,
)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import ImgurIE
from .ina import InaIE
from .indavideo import (
IndavideoIE,
IndavideoEmbedIE,
)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
MSNBCIE,
)
from .ndr import (
NDRIE,
NJoyIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
VPROIE,
WNLIE
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .periscope import (
PeriscopeIE,
QuickscopeIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
| unlicense |
eHealthAfrica/onadata | onadata/libs/utils/backup_tools.py | 13 | 5473 | import codecs
from datetime import datetime
import errno
import os
import shutil
import sys
import tempfile
import zipfile
from time import sleep
from onadata.apps.logger.import_tools import django_file
from onadata.apps.logger.models import Instance
from onadata.libs.utils.logger_tools import create_instance
from onadata.libs.utils.model_tools import queryset_iterator
DATE_FORMAT = "%Y-%m-%d-%H-%M-%S"
def _date_created_from_filename(filename):
base_name, ext = os.path.splitext(filename)
parts = base_name.split("-")
if len(parts) < 6:
raise ValueError(
"Inavlid filename - it must be in the form"
" 'YYYY-MM-DD-HH-MM-SS[-i].xml'")
parts_dict = dict(
zip(["year", "month", "day", "hour", "min", "sec"], parts))
return datetime.strptime(
"%(year)s-%(month)s-%(day)s-%(hour)s-%(min)s-%(sec)s" %
parts_dict, DATE_FORMAT)
def create_zip_backup(zip_output_file, user, xform=None):
# create a temp dir that we'll create our structure within and zip it
# when we are done
tmp_dir_path = tempfile.mkdtemp()
instances_path = os.path.join(tmp_dir_path, "instances")
# get the xls file from storage
# for each submission in the database - create an xml file in this
# form
# /<id_string>/YYYY/MM/DD/YYYY-MM-DD-HH-MM-SS.xml
qs = Instance.objects.filter(xform__user=user)
if xform:
qs = qs.filter(xform=xform)
num_instances = qs.count()
done = 0
sys.stdout.write("Creating XML Instances\n")
for instance in queryset_iterator(qs, 100):
# get submission time
date_time_str = instance.date_created.strftime(DATE_FORMAT)
date_parts = date_time_str.split("-")
sub_dirs = os.path.join(*date_parts[:3])
# create the directories
full_path = os.path.join(instances_path, sub_dirs)
if not os.path.exists(full_path):
try:
os.makedirs(full_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
full_xml_path = os.path.join(full_path, date_time_str + ".xml")
# check for duplicate file names
file_index = 1
while os.path.exists(full_xml_path):
full_xml_path = os.path.join(
full_path, "%s-%d.xml" % (date_time_str, file_index))
file_index += 1
# create the instance xml
with codecs.open(full_xml_path, "wb", "utf-8") as f:
f.write(instance.xml)
done += 1
sys.stdout.write("\r%.2f %% done" % (
float(done)/float(num_instances) * 100))
sys.stdout.flush()
sleep(0)
# write zip file
sys.stdout.write("\nWriting to ZIP arhive.\n")
zf = zipfile.ZipFile(zip_output_file, "w")
done = 0
for dir_path, dir_names, file_names in os.walk(tmp_dir_path):
for file_name in file_names:
archive_path = dir_path.replace(tmp_dir_path + os.path.sep,
"", 1)
zf.write(os.path.join(dir_path, file_name),
os.path.join(archive_path, file_name))
done += 1
sys.stdout.write("\r%.2f %% done" % (
float(done)/float(num_instances) * 100))
sys.stdout.flush()
sleep(0)
zf.close()
# removed dir tree
shutil.rmtree(tmp_dir_path)
sys.stdout.write("\nBackup saved to %s\n" % zip_output_file)
def restore_backup_from_zip(zip_file_path, username):
try:
temp_directory = tempfile.mkdtemp()
zf = zipfile.ZipFile(zip_file_path)
zf.extractall(temp_directory)
except zipfile.BadZipfile:
sys.stderr.write("Bad zip arhcive.")
else:
return restore_backup_from_path(temp_directory, username, "backup")
finally:
shutil.rmtree(temp_directory)
def restore_backup_from_xml_file(xml_instance_path, username):
# check if its a valid xml instance
file_name = os.path.basename(xml_instance_path)
xml_file = django_file(
xml_instance_path,
field_name="xml_file",
content_type="text/xml")
media_files = []
try:
date_created = _date_created_from_filename(file_name)
except ValueError as e:
sys.stderr.write(
"Couldn't determine date created from filename: '%s'\n" %
file_name)
date_created = datetime.now()
sys.stdout.write("Creating instance from '%s'\n" % file_name)
try:
create_instance(
username, xml_file, media_files,
date_created_override=date_created)
return 1
except Exception as e:
sys.stderr.write(
"Could not restore %s, create instance said: %s\n" %
(file_name, e))
return 0
def restore_backup_from_path(dir_path, username, status):
"""
Only restores xml submissions, media files are assumed to still be in
storage and will be retrieved by the filename stored within the submission
"""
num_instances = 0
num_restored = 0
for dir_path, dir_names, file_names in os.walk(dir_path):
for file_name in file_names:
# check if its a valid xml instance
xml_instance_path = os.path.join(dir_path, file_name)
num_instances += 1
num_restored += restore_backup_from_xml_file(
xml_instance_path,
username)
return num_instances, num_restored
| bsd-2-clause |
hopeall/odoo | addons/account/report/account_journal.py | 255 | 10035 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.context = context
self.period_ids = []
self.last_move_id = False
self.journal_ids = []
self.sort_selection = 'am.name'
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'display_currency':self._display_currency,
'get_sortby': self._get_sortby,
'get_target_move': self._get_target_move,
'check_last_move_id': self.check_last_move_id,
'set_last_move_id': self.set_last_move_id,
'tax_codes': self.tax_codes,
'sum_vat': self._sum_vat,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
self.period_ids = tuple(data['form']['periods'])
self.journal_ids = tuple(data['form']['journal_ids'])
new_ids = data['form'].get('active_ids', [])
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.sort_selection = data['form'].get('sort_selection', 'date')
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
elif new_ids:
#in case of direct access from account.journal.period object, we need to set the journal_ids and periods_ids
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
def set_last_move_id(self, move_id):
self.last_move_id = move_id
def check_last_move_id(self, move_id):
'''
return True if we need to draw a gray line above this line, used to separate moves
'''
if self.last_move_id:
return not(self.last_move_id == move_id)
return False
def tax_codes(self, period_id, journal_id):
ids_journal_period = self.pool.get('account.journal.period').search(self.cr, self.uid,
[('journal_id', '=', journal_id), ('period_id', '=', period_id)])
self.cr.execute(
'select distinct tax_code_id from account_move_line ' \
'where period_id=%s and journal_id=%s and tax_code_id is not null and state<>\'draft\'',
(period_id, journal_id)
)
ids = map(lambda x: x[0], self.cr.fetchall())
tax_code_ids = []
if ids:
self.cr.execute('select id from account_tax_code where id in %s order by code', (tuple(ids),))
tax_code_ids = map(lambda x: x[0], self.cr.fetchall())
tax_codes = self.pool.get('account.tax.code').browse(self.cr, self.uid, tax_code_ids)
return tax_codes
def _sum_vat(self, period_id, journal_id, tax_code_id):
self.cr.execute('select sum(tax_amount) from account_move_line where ' \
'period_id=%s and journal_id=%s and tax_code_id=%s and state<>\'draft\'',
(period_id, journal_id, tax_code_id))
return self.cr.fetchone()[0] or 0.0
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(debit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s '+ self.query_get_clause+'',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def lines(self, period_id, journal_id=False):
if not journal_id:
journal_id = self.journal_ids
else:
journal_id = [journal_id]
obj_mline = self.pool.get('account.move.line')
self.cr.execute('update account_journal_period set state=%s where journal_id IN %s and period_id=%s and state=%s', ('printed', self.journal_ids, period_id, 'draft'))
self.pool.get('account.journal.period').invalidate_cache(self.cr, self.uid, ['state'], context=self.context)
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT l.id FROM account_move_line l, account_move am WHERE l.move_id=am.id AND am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ORDER BY '+ self.sort_selection + ', l.move_id',(tuple(move_state), period_id, tuple(journal_id) ))
ids = map(lambda x: x[0], self.cr.fetchall())
return obj_mline.browse(self.cr, self.uid, ids)
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c,account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _get_sortby(self, data):
# TODO: deprecated, to remove in trunk
if self.sort_selection == 'date':
return self._translate('Date')
elif self.sort_selection == 'ref':
return self._translate('Reference Number')
return self._translate('Date')
class report_journal(osv.AbstractModel):
_name = 'report.account.report_journal'
_inherit = 'report.abstract_report'
_template = 'account.report_journal'
_wrapped_report_class = journal_print
class report_salepurchasejournal(osv.AbstractModel):
_name = 'report.account.report_salepurchasejournal'
_inherit = 'report.abstract_report'
_template = 'account.report_salepurchasejournal'
_wrapped_report_class = journal_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shsingh/ansible | test/lib/ansible_test/_internal/sanity/ignores.py | 55 | 2978 | """Sanity test for the sanity ignore file."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..sanity import (
SanityFailure,
SanityIgnoreParser,
SanityVersionNeutral,
SanitySuccess,
SanityMessage,
)
from ..test import (
calculate_confidence,
calculate_best_confidence,
)
from ..config import (
SanityConfig,
)
class IgnoresTest(SanityVersionNeutral):
"""Sanity test for sanity test ignore entries."""
@property
def can_ignore(self): # type: () -> bool
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self): # type: () -> bool
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
# noinspection PyUnusedLocal
def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
sanity_ignore = SanityIgnoreParser.load(args)
messages = []
# parse errors
messages.extend(SanityMessage(
message=message,
path=sanity_ignore.relative_path,
line=line,
column=column,
confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
) for line, column, message in sanity_ignore.parse_errors)
# file not found errors
messages.extend(SanityMessage(
message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
path=sanity_ignore.relative_path,
line=line,
column=1,
confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
) for line, path in sanity_ignore.file_not_found_errors)
# conflicting ignores and skips
for test_name, ignores in sanity_ignore.ignores.items():
for ignore_path, ignore_entry in ignores.items():
skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
if not skip_line_no:
continue
for ignore_line_no in ignore_entry.values():
messages.append(SanityMessage(
message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
path=sanity_ignore.relative_path,
line=ignore_line_no,
column=1,
confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
))
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
| gpl-3.0 |
lucasdemarchi/ardupilot | mk/VRBRAIN/Tools/genmsg/test/test_genmsg_command_line.py | 51 | 1974 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
| gpl-3.0 |
jtg-gg/blink | Tools/Scripts/webkitpy/thirdparty/coverage/control.py | 64 | 26459 | """Core control stuff for Coverage."""
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
from coverage.files import PathAliases, find_python_files
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
class coverage(object):
"""Programmatic access to Coverage.
To use::
from coverage import coverage
cov = coverage()
cov.start()
#.. blah blah (run your code) blah blah ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what config file to read. If it is a string,
it is the name of the config file to read. If it is True, then a
standard file is read (".coveragerc"). If it is False, then no file is
read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of filename patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
"""
from coverage import __version__
# A record of all the warnings that have been issued.
self._warnings = []
# Build our configuration from a number of sources:
# 1: defaults:
self.config = CoverageConfig()
# 2: from the coveragerc file:
if config_file:
if config_file is True:
config_file = ".coveragerc"
try:
self.config.from_file(config_file)
except ValueError:
_, err, _ = sys.exc_info()
raise CoverageException(
"Couldn't read config file %s: %s" % (config_file, err)
)
# 3: from environment variables:
self.config.from_environment('COVERAGE_OPTIONS')
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
self.config.data_file = env_data_file
# 4: from constructor arguments:
if isinstance(omit, string_class):
omit = [omit]
if isinstance(include, string_class):
include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include
)
self.auto_data = auto_data
self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
self._exclude_re = {}
self._exclude_regex_stale()
self.file_locator = FileLocator()
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
for src in self.config.source or []:
if os.path.exists(src):
self.source.append(self.file_locator.canonical_filename(src))
else:
self.source_pkgs.append(src)
self.omit = self._prep_patterns(self.config.omit)
self.include = self._prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
branch=self.config.branch, warn=self._warn
)
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self.run_suffix` now, and promote it to `self.data_suffix` if we
# find that we are collecting data later.
if data_suffix or self.config.parallel:
if not isinstance(data_suffix, string_class):
# if data_suffix=True, use .machinename.pid.random
data_suffix = True
else:
data_suffix = None
self.data_suffix = None
self.run_suffix = data_suffix
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
collector="coverage v%s" % __version__
)
# The dirs for files considered "installed with the interpreter".
self.pylib_dirs = []
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, os, random, socket):
if hasattr(m, "__file__"):
m_dir = self._canonical_dir(m.__file__)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
# To avoid tracing the coverage code itself, we skip anything located
# where we are.
self.cover_dir = self._canonical_dir(__file__)
# The matchers for _should_trace, created when tracing starts.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
# Only _harvest_data once per measurement cycle.
self._harvested = False
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
# When tearing down the coverage object, modules can become None.
# Saving the modules as object attributes avoids problems, but it is
# quite ad-hoc which modules need to be saved and which references
# need to use the object attributes.
self.socket = socket
self.os = os
self.random = random
def _canonical_dir(self, f):
"""Return the canonical directory of the file `f`."""
return os.path.split(self.file_locator.canonical_filename(f))[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
return filename
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a canonicalized filename if it should be traced, False if it
should not.
"""
if os is None:
return False
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return False
if filename.endswith(".html"):
# Jinja and maybe other templating systems compile templates into
# Python code, but use the template filename as the filename in
# the compiled code. Of course, those filenames are useless later
# so don't bother collecting. TODO: How should we really separate
# out good file extensions from bad?
return False
self._check_for_packages()
# Compiled Python files have two filenames: frame.f_code.co_filename is
# the filename at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals.get('__file__')
if dunder_file:
filename = self._source_for_file(dunder_file)
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = self.file_locator.canonical_filename(filename)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
return False
elif self.include_match:
if not self.include_match.match(canonical):
return False
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
return False
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
return False
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(canonical):
return False
return canonical
# To log what should_trace returns, change this to "if 1:"
if 0:
_real_should_trace = _should_trace
def _should_trace(self, filename, frame): # pylint: disable=E0102
"""A logging decorator around the real _should_trace function."""
ret = self._real_should_trace(filename, frame)
print("should_trace: %r -> %r" % (filename, ret))
return ret
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def _prep_patterns(self, patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
patterns = patterns or []
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(self.file_locator.abs_file(p))
return prepped
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
# measure. Each time through here, we see if we've imported any of
# them yet. If so, we add its file to source_match, and we don't have
# to look for that package any more.
if self.source_pkgs:
found = []
for pkg in self.source_pkgs:
try:
mod = sys.modules[pkg]
except KeyError:
continue
found.append(pkg)
try:
pkg_file = mod.__file__
except AttributeError:
self._warn("Module %s has no Python source." % pkg)
else:
d, f = os.path.split(pkg_file)
if f.startswith('__init__.'):
# This is actually a package, return the directory.
pkg_file = d
else:
pkg_file = self._source_for_file(pkg_file)
pkg_file = self.file_locator.canonical_filename(pkg_file)
self.source.append(pkg_file)
self.source_match.add(pkg_file)
for pkg in found:
self.source_pkgs.remove(pkg)
def use_cache(self, usecache):
"""Control the use of a data file (incorrectly called a cache).
`usecache` is true or false, whether to read and write data on disk.
"""
self.data.usefile(usecache)
def load(self):
"""Load previously-collected coverage data from the data file."""
self.collector.reset()
self.data.read()
def start(self):
"""Start measuring code coverage."""
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
# Save coverage data when Python exits.
if not self.atexit_registered:
atexit.register(self.save)
self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
else:
if self.cover_dir:
self.cover_match = TreeMatcher([self.cover_dir])
if self.pylib_dirs:
self.pylib_match = TreeMatcher(self.pylib_dirs)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
self._harvested = False
self.collector.start()
def stop(self):
"""Stop measuring code coverage."""
self.collector.stop()
self._harvest_data()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self.collector.reset()
self.data.erase()
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See `exclude` for the lists
that are available, and their meaning.
"""
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
data_suffix = self.data_suffix
if data_suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
data_suffix = "%s.%s.%06d" % (
self.socket.gethostname(), self.os.getpid(),
self.random.randint(0, 99999)
)
self._harvest_data()
self.data.write(suffix=data_suffix)
def combine(self):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
"""
aliases = None
if self.config.paths:
aliases = PathAliases(self.file_locator)
for paths in self.config.paths.values():
result = paths[0]
for pattern in paths[1:]:
aliases.add(pattern, result)
self.data.combine_parallel_data(aliases=aliases)
def _harvest_data(self):
"""Get the collected data and reset the collector.
Also warn about various problems collecting data.
"""
if not self._harvested:
self.data.add_line_data(self.collector.get_line_data())
self.data.add_arc_data(self.collector.get_arc_data())
self.collector.reset()
# If there are still entries in the source_pkgs list, then we never
# encountered those packages.
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
# Find out if we got any data.
summary = self.data.summary()
if not summary:
self._warn("No data was collected.")
# Find files that were never executed at all.
for src in self.source:
for py_file in find_python_files(src):
self.data.touch_file(py_file)
self._harvested = True
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a filename. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The filename for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
analysis = self._analyze(morf)
return (
analysis.filename, analysis.statements, analysis.excluded,
analysis.missing, analysis.missing_formatted()
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
return Analysis(self, it)
def report(self, morfs=None, show_missing=True, ignore_errors=None,
file=None, # pylint: disable=W0622
omit=None, include=None
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of filename patterns. Modules whose filenames
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = SummaryReporter(
self, show_missing, self.config.ignore_errors
)
reporter.report(morfs, outfile=file, config=self.config)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = AnnotateReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Generate an HTML report.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
html_dir=directory,
)
reporter = HtmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, outfile=outfile, config=self.config)
finally:
if file_to_close:
file_to_close.close()
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
import platform, re
try:
implementation = platform.python_implementation()
except AttributeError:
implementation = "unknown"
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', implementation),
('cwd', os.getcwd()),
('path', sys.path),
('environment', [
("%s = %s" % (k, v)) for k, v in os.environ.items()
if re.search("^COV|^PY", k)
]),
]
return info
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
if os.environ.get("COVERAGE_COVERAGE"):
# Measuring coverage within coverage.py takes yet more trickery.
cov.cover_dir = "Please measure coverage.py!"
cov.start()
| bsd-3-clause |
drcoms/jlu-drcom-client | jlu-drcom-py3/newclinet-py3.py | 1 | 12355 | #!/usr/bin/env python
# coding: utf-8
# license: AGPL-V3
import re
import socket
import struct
import time
from hashlib import md5
import sys
import os
import random
import platform
# CONFIG
server = '10.100.61.3'
username = b'XXXXX' # 用户名
password = b'XXXXX' # 密码
host_ip = '100.100.100.100' # ip地址
mac = 0x112288776655 # mac地址
host_name = b'YOURPCNAME' # 计算机名
host_os = b'Windows 10' # 操作系统
CONTROLCHECKSTATUS = b'\x20'
ADAPTERNUM = b'\x03'
IPDOG = b'\x01'
PRIMARY_DNS = '10.10.10.10'
dhcp_server = '0.0.0.0'
AUTH_VERSION = b'\x68\x00'
KEEP_ALIVE_VERSION = b'\xdc\x02'
nic_name = '' # Indicate your nic, e.g. 'eth0.2'.nic_name
bind_ip = '0.0.0.0'
# CONFIG_END
keep_alive_times = 0
class ChallengeException (Exception):
def __init__(self):
pass
class LoginException (Exception):
def __init__(self):
pass
def bind_nic():
try:
import fcntl
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
return get_ip_address(nic_name)
except ImportError as e:
print('Indicate nic feature need to be run under Unix based system.')
return '0.0.0.0'
except IOError as e:
print(nic_name + ' is unacceptable !')
return '0.0.0.0'
finally:
return '0.0.0.0'
if nic_name != '':
bind_ip = bind_nic()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((bind_ip, 61440))
s.settimeout(3)
SALT = ''
IS_TEST = True
# specified fields based on version
CONF = "/etc/drcom.conf"
UNLIMITED_RETRY = True
EXCEPTION = False
DEBUG = False # log saves to file
LOG_PATH = '/var/log/drcom_client.log'
if IS_TEST:
DEBUG = True
LOG_PATH = 'drcom_client.log'
def log(*args, **kwargs):
print(*args, **kwargs)
if DEBUG and platform.uname().system != 'Windows':
with open(LOG_PATH,'a') as f:
f.write(s + '\n')
def challenge(svr, ran):
while True:
t = struct.pack("<H", int(ran) % (0xFFFF))
s.sendto(b"\x01\x02" + t + b"\x09" + b"\x00"*15, (svr, 61440))
try:
data, address = s.recvfrom(1024)
log('[challenge] recv', data.hex())
except:
log('[challenge] timeout, retrying...')
continue
if address == (svr, 61440):
break
else:
log(f"Wrong address: {address}")
exit()
log('[DEBUG] challenge:\n' + data.hex())
if data[0] != 2:
raise ChallengeException
log('[challenge] challenge packet sent.')
return data[4:8]
def md5sum(s):
m = md5()
m.update(s)
return m.digest()
def dump(n):
s = '%x' % n
if len(s) & 1:
s = '0' + s
return bytes.fromhex(s)
def ror(md5 : bytes, pwd : bytes):
ret = b''
for i in range(len(pwd)):
x = md5[i] ^ pwd[i]
ret += (((x << 3) & 0xFF) + (x >> 5)).to_bytes(1, 'big')
return ret
def keep_alive_package_builder(number, random, tail: bytes, type=1, first=False):
data = b'\x07' + number.to_bytes(1, 'big') + b'\x28\x00\x0b' + type.to_bytes(1, 'big')
if first:
data += b'\x0f\x27'
else:
data += KEEP_ALIVE_VERSION
data += b'\x2f\x12' + b'\x00' * 6
data += tail
data += b'\x00' * 4
#data += struct.pack("!H",0xdc02)z
if type == 3:
foo = b''.join([int(i).to_bytes(1, 'big') for i in host_ip.split('.')]) # host_ip
# CRC
# edited on 2014/5/12, filled zeros to checksum
# crc = packet_CRC(data+foo)
crc = b'\x00' * 4
#data += struct.pack("!I",crc) + foo + b'\x00' * 8
data += crc + foo + b'\x00' * 8
else: # packet type = 1
data += b'\x00' * 16
return data
def keep_alive2(*args):
tail = b''
packet = b''
svr = server
ran = random.randint(0, 0xFFFF)
ran += random.randint(1, 10)
# 2014/10/15 add by latyas, maybe svr sends back a file packet
svr_num = 0
packet = keep_alive_package_builder(svr_num, dump(ran), b'\x00'*4, 1, True)
while True:
log('[keep-alive2] send1', packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep-alive2] recv1', data.hex())
if data.startswith(b'\x07\x00\x28\x00') or data.startswith(b'\x07' + svr_num.to_bytes(1, 'big') + b'\x28\x00'):
break
elif data[0] == 0x07 and data[2] == 0x10:
log('[keep-alive2] recv file, resending..')
svr_num = svr_num + 1
packet = keep_alive_package_builder(
svr_num, dump(ran), b'\x00'*4, 1, False)
else:
log('[keep-alive2] recv1/unexpected', data.hex())
#log('[keep-alive2] recv1',data.hex())
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), b'\x00' * 4, 1, False)
log('[keep-alive2] send2', packet.hex())
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv2/unexpected', data.hex())
log('[keep-alive2] recv2', data.hex())
tail = data[16:20]
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), tail, 3, False)
log('[keep-alive2] send3', packet.hex())
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv3/unexpected', data.hex())
log('[keep-alive2] recv3', data.hex())
tail = data[16:20]
log("[keep-alive2] keep-alive2 loop was in daemon.")
i = svr_num
while True:
try:
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i, dump(ran), tail, 1, False)
#log('DEBUG: keep_alive2,packet 4\n',packet.hex())
log('[keep_alive2] send', str(i), packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', data.hex())
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 4 return\n',data.hex())
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i+1, dump(ran), tail, 3, False)
#log('DEBUG: keep_alive2,packet 5\n',packet.hex())
s.sendto(packet, (svr, 61440))
log('[keep_alive2] send', str(i+1), packet.hex())
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', data.hex())
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 5 return\n',data.hex())
i = (i+2) % 0xFF
time.sleep(20)
keep_alive1(*args)
except:
continue
def checksum(s):
ret = 1234
for i in re.findall(b'....', s):
ret ^= int(i[::-1].hex(), 16)
ret = (1968 * ret) & 0xffffffff
return struct.pack('<I', ret)
def mkpkt(salt, usr, pwd, mac):
data = b'\x03\x01\x00'+ (len(usr)+20).to_bytes(1, 'big')
data += md5sum(b'\x03\x01'+salt+pwd)
data += usr.ljust(36, b'\x00')
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(data[4:10].hex(), 16) ^
mac).rjust(6, b'\x00') # mac xor md51
data += md5sum(b"\x01" + pwd + salt + b'\x00'*4) # md52
data += b'\x01' # number of ip
data += b''.join([int(x).to_bytes(1,'big') for x in host_ip.split('.')])
data += b'\x00'*4 # your ipaddress 2
data += b'\x00'*4 # your ipaddress 3
data += b'\x00'*4 # your ipaddress 4
data += md5sum(data + b'\x14\x00\x07\x0b')[:8] # md53
data += IPDOG
data += b'\x00'*4 # delimeter
data += host_name.ljust(32, b'\x00')
data += b''.join([ int(i).to_bytes(1, 'big') for i in PRIMARY_DNS.split('.')]) # primary dns
data += b''.join([ int(i).to_bytes(1, 'big') for i in dhcp_server.split('.')]) # DHCP dns
data += b'\x00\x00\x00\x00' # secondary dns:0.0.0.0
data += b'\x00' * 8 # delimeter
data += b'\x94\x00\x00\x00' # unknow
data += b'\x06\x00\x00\x00' # os major
data += b'\x02\x00\x00\x00' # os minor
data += b'\xf0\x23\x00\x00' # OS build
data += b'\x02\x00\x00\x00' # os unknown
data += b'\x44\x72\x43\x4f\x4d\x00\xcf\x07\x68'
data += b'\x00' * 55 # unknown string
data += b'\x33\x64\x63\x37\x39\x66\x35\x32\x31\x32\x65\x38\x31\x37\x30\x61\x63\x66\x61\x39\x65\x63\x39\x35\x66\x31\x64\x37\x34\x39\x31\x36\x35\x34\x32\x62\x65\x37\x62\x31'
data += b'\x00' * 24
data += AUTH_VERSION
data += b'\x00' + len(pwd).to_bytes(1, 'big')
data += ror(md5sum(b'\x03\x01'+salt+pwd), pwd)
data += b'\x02\x0c'
data += checksum(data+b'\x01\x26\x07\x11\x00\x00'+dump(mac))
data += b'\x00\x00' # delimeter
data += dump(mac)
if (len(pwd) / 4) != 4:
data += b'\x00' * (len(pwd) // 4) # strange。。。
data += b'\x60\xa2' # unknown, filled numbers randomly =w=
data += b'\x00' * 28
log('[mkpkt]', data.hex())
return data
def login(usr, pwd, svr):
global SALT
i = 0
while True:
salt = challenge(svr, time.time()+random.randint(0xF, 0xFF))
SALT = salt
log('[salt] ', SALT)
packet = mkpkt(salt, usr, pwd, mac) #生成数据包
log('[login] send', packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[login] recv', data.hex())
log('[login] packet sent.')
if address == (svr, 61440):
if data[0] == 4:
log('[login] loged in')
break
else:
log(f'[login] login failed. data[0] = {data[0]} type={type(data[0])}')
exit(2)
else:
if i >= 5 and UNLIMITED_RETRY == False:
log('[login] exception occured.')
sys.exit(1)
else:
exit(2)
log('[login] login sent')
# 0.8 changed:
return data[23:39]
# return data[-22:-6]
def keep_alive1(salt, tail, pwd, svr):
foo = struct.pack('!H', int(time.time()) % 0xFFFF)
data = b'\xff' + md5sum(b'\x03\x01'+salt+pwd) + b'\x00\x00\x00'
data += tail
data += foo + b'\x00\x00\x00\x00'
log('[keep_alive1] send', data.hex())
s.sendto(data, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
break
else:
log('[keep-alive1]recv/not expected', data.hex())
log('[keep-alive1] recv', data.hex())
def empty_socket_buffer():
# empty buffer for some fucking schools
log('starting to empty socket buffer')
try:
while True:
data, address = s.recvfrom(1024)
log('recived sth unexpected', data.hex())
if s == '':
break
except socket.timeout as timeout_err:
# get exception means it has done.
log(f'exception in empty_socket_buffer {timeout_err}')
log('emptyed')
def daemon():
if(platform.uname().system != 'Windows'):
with open('/var/run/jludrcom.pid', 'w') as f:
f.write(str(os.getpid()))
def main():
if not IS_TEST:
daemon()
execfile(CONF, globals())
log("auth svr:", server, "\nusername:", username ,
"\npassword:", password, "\nmac:", str(hex(mac)))
log(bind_ip)
# 流程 login -> keep alive
while True:
try:
package_tail = login(username, password, server)
except LoginException:
log("登录失败!")
break
log('package_tail', package_tail.hex())
# keep_alive1 is fucking bullshit!
# ↑↑↑ 附议 ↑↑↑
empty_socket_buffer()
keep_alive1(SALT, package_tail, password, server)
keep_alive2(SALT, package_tail, password, server)
if __name__ == "__main__":
main()
| agpl-3.0 |
UniversalMasterEgg8679/ansible | lib/ansible/utils/module_docs_fragments/junos.py | 101 | 2910 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. The port value will default to the well known SSH port
of 22 (for C(transport=cli)) or port 830 (for C(transport=netconf))
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the key
used to authenticate the SSH session. If the value is not specified in
the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
"""
| gpl-3.0 |
whatrye/twister-core | libtorrent/tools/parse_memory_log.py | 59 | 3492 | #! /usr/bin/env python
import os, sys, time
# usage: memory.log memory_index.log
lines = open(sys.argv[1], 'rb').readlines()
index = open(sys.argv[2], 'rb').readlines()
# logfile format:
# #<allocation-point> <time(ms)> <key ('A' | 'F')> <address> <size> <total-size> <total-space-time> <peak-total-size>
# example:
# #12 38 A 0xd902a0 16 16 0 16
allocation_points_to_print = 30
def print_allocation_point(ap):
print 'space_time: %d kBms' % (ap['spacetime'] / 1024)
print 'allocations: %d' % ap['allocations']
print 'peak: %d kB' % (ap['peak'] / 1024)
print 'stack: '
counter = 0
for e in ap['stack']:
print '#%d %s' % (counter, e)
counter += 1
allocation_points = []
for l in index:
l = l.split('#')
l.pop(0)
ap = { 'allocations': 0, 'peak': 0, 'spacetime': 0, 'allocation_point': len(allocation_points), 'stack': l}
allocation_points.append(ap);
for l in lines:
l = l.lstrip('#').rstrip('\n').split(' ')
if len(l) != 8:
print l
continue
try:
ap = int(l[0])
allocation_points[ap]['allocations'] += 1
allocation_points[ap]['peak'] = int(l[7])
allocation_points[ap]['spacetime'] = int(l[6])
except Exception, e:
print type(e), e, l
print '=== space time ==='
hot_ap = []
allocation_points.sort(key = lambda x:x['spacetime'], reverse=True);
counter = 0
for ap in allocation_points[0:allocation_points_to_print]:
print '== %d ==' % counter
counter += 1
print_allocation_point(ap)
hot_ap.append(ap['allocation_point']);
print '=== allocations ==='
allocation_points.sort(key = lambda x:x['allocations'], reverse=True);
for ap in allocation_points[0:allocation_points_to_print]:
print_allocation_point(ap)
print '=== peak ==='
allocation_points.sort(key = lambda x:x['peak'], reverse=True);
for ap in allocation_points[0:allocation_points_to_print]:
print_allocation_point(ap)
# generate graph
lines = open(sys.argv[1], 'rb').readlines()
out = open('memory.dat', 'wb')
cur_line = [0] * allocation_points_to_print
prev_line = [0] * allocation_points_to_print
last_time = 0
for l in lines:
l = l.lstrip('#').rstrip('\n').split(' ')
if len(l) != 8:
print l
continue
try:
time = int(l[1])
if time != last_time:
print >>out, last_time, '\t',
for i in range(allocation_points_to_print):
if cur_line[i] == -1:
print >>out, prev_line[i], '\t',
else:
print >>out, cur_line[i], '\t',
prev_line[i] = cur_line[i]
print >>out
cur_line = [-1] * allocation_points_to_print
last_time = time
size = int(l[5])
ap = int(l[0])
if ap in hot_ap:
index = hot_ap.index(ap)
cur_line[index] = max(cur_line[index], size)
except Exception, e:
print type(e), e, l
out.close()
out = open('memory.gnuplot', 'wb')
print >>out, "set term png size 1200,700"
print >>out, 'set output "memory.png"'
print >>out, 'set xrange [0:*]'
print >>out, 'set xlabel "time (ms)"'
print >>out, 'set ylabel "bytes (B)"'
print >>out, "set style data lines"
print >>out, "set key box"
print >>out, 'plot',
for k in range(allocation_points_to_print):
print >>out, ' "memory.dat" using 1:(',
for i in range(k, allocation_points_to_print):
if i == k: print >>out, '$%d' % (i + 2),
else: print >>out, '+$%d' % (i + 2),
print >>out, ') title "%d" with filledcurves x1, \\' % k
print >>out, 'x=0'
out.close()
os.system('gnuplot memory.gnuplot');
| mit |
ryancanhelpyou/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py | 652 | 6143 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions and exceptions used by WebSocket opening handshake
processors.
"""
from mod_pywebsocket import common
from mod_pywebsocket import http_header_util
class AbortedByUserException(Exception):
"""Exception for aborting a connection intentionally.
If this exception is raised in do_extra_handshake handler, the connection
will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
If this exception is raised in transfer_data_handler, the connection will
be closed without closing handshake. No other WebSocket or HTTP(S) handler
will be invoked.
"""
pass
class HandshakeException(Exception):
"""This exception will be raised when an error occurred while processing
WebSocket initial handshake.
"""
def __init__(self, name, status=None):
super(HandshakeException, self).__init__(name)
self.status = status
class VersionException(Exception):
"""This exception will be raised when a version of client request does not
match with version the server supports.
"""
def __init__(self, name, supported_versions=''):
"""Construct an instance.
Args:
supported_version: a str object to show supported hybi versions.
(e.g. '8, 13')
"""
super(VersionException, self).__init__(name)
self.supported_versions = supported_versions
def get_default_port(is_secure):
if is_secure:
return common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
return common.DEFAULT_WEB_SOCKET_PORT
def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be encoded HTTP token.
state = http_header_util.ParsingState(subprotocol)
token = http_header_util.consume_token(state)
rest = http_header_util.peek(state)
# If |rest| is not None, |subprotocol| is not one token or invalid. If
# |rest| is None, |token| must not be None because |subprotocol| is
# concatenation of |token| and |rest| and is not None.
if rest is not None:
raise HandshakeException('Invalid non-token string in subprotocol '
'name: %r' % rest)
def parse_host_header(request):
fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
return fields[0], int(fields[1])
except ValueError, e:
raise HandshakeException('Invalid port number format: %r' % e)
def format_header(name, value):
return '%s: %s\r\n' % (name, value)
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
raise HandshakeException('Header %s is not defined' % key)
return value
def validate_mandatory_header(request, key, expected_value, fail_status=None):
value = get_mandatory_header(request, key)
if value.lower() != expected_value.lower():
raise HandshakeException(
'Expected %r for header %s but found %r (case-insensitive)' %
(expected_value, key, value), status=fail_status)
def check_request_line(request):
# 5.1 1. The three character UTF-8 string "GET".
# 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
if request.method != 'GET':
raise HandshakeException('Method is not GET: %r' % request.method)
if request.protocol != 'HTTP/1.1':
raise HandshakeException('Version is not HTTP/1.1: %r' %
request.protocol)
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
Leading LWSes must be trimmed.
"""
state = http_header_util.ParsingState(data)
token_list = []
while True:
token = http_header_util.consume_token(state)
if token is not None:
token_list.append(token)
http_header_util.consume_lwses(state)
if http_header_util.peek(state) is None:
break
if not http_header_util.consume_string(state, ','):
raise HandshakeException(
'Expected a comma but found %r' % http_header_util.peek(state))
http_header_util.consume_lwses(state)
if len(token_list) == 0:
raise HandshakeException('No valid token found')
return token_list
# vi:sts=4 sw=4 et
| mpl-2.0 |
widespace-os/amphtml | validator/webui/build.py | 14 | 5644 | #!/usr/bin/env python2.7
#
# Copyright 2016 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
# TODO(powdercloud): Make a gulp file or similar for this. For now
# it's simply split off from the main build.py in the parent
# directory, but this is not an idiomatic use to build a Javascript or
# Polymer project, and unlike for the parent directory there's no
# particular benefit to using Python.
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print >> sys.stderr, msg
sys.exit(1)
def GetNodeJsCmd():
"""Ensure Node.js is installed and return the proper command to run."""
logging.info('entering ...')
for cmd in ['node', 'nodejs']:
try:
output = subprocess.check_output([cmd, '--eval', 'console.log("42")'])
if output.strip() == '42':
logging.info('... done')
return cmd
except (subprocess.CalledProcessError, OSError):
continue
Die('Node.js not found. Try "apt-get install nodejs".')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in ['webui.js', 'index.html',
'logo-blue.svg', 'package.json']:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure that yarn is installed.
try:
subprocess.check_output(['yarn', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Yarn package manager not found. Run '
'"curl -o- -L https://yarnpkg.com/install.sh | bash" '
'or see https://yarnpkg.com/docs/install.')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using yarn."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator webui dependencies ...')
subprocess.check_call(
['yarn', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def CreateWebuiAppengineDist(out_dir):
"""Creates the webui vulcanized directory to deploy to Appengine.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
try:
tempdir = tempfile.mkdtemp()
# Merge the contents of webui with the installed node_modules into a
# common root (a temp directory). This lets us use the vulcanize tool.
for entry in os.listdir('.'):
if entry != 'node_modules':
if os.path.isfile(entry):
shutil.copyfile(entry, os.path.join(tempdir, entry))
else:
shutil.copytree(entry, os.path.join(tempdir, entry))
for entry in os.listdir('node_modules'):
if not os.path.isdir('node_modules/' + entry):
continue
elif entry == 'web-animations-js':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, '@polymer', entry))
elif entry != '@polymer':
shutil.copytree(os.path.join('node_modules', entry),
os.path.join(tempdir, entry))
for entry in os.listdir('node_modules/@polymer'):
shutil.copytree(os.path.join('node_modules/@polymer', entry),
os.path.join(tempdir, '@polymer', entry))
vulcanized_index_html = subprocess.check_output([
'node_modules/vulcanize/bin/vulcanize',
'--inline-scripts', '--inline-css',
'-p', tempdir, 'index.html'])
finally:
shutil.rmtree(tempdir)
webui_out = os.path.join(out_dir, 'webui_appengine')
shutil.copytree('.', webui_out, ignore=shutil.ignore_patterns('dist'))
f = open(os.path.join(webui_out, 'index.html'), 'w')
f.write(vulcanized_index_html)
f.close()
logging.info('... success')
def Main():
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
nodejs_cmd = GetNodeJsCmd()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
CreateWebuiAppengineDist(out_dir='dist')
if __name__ == '__main__':
Main()
| apache-2.0 |
Stratio/stratio-cassandra | pylib/cqlshlib/test/test_cqlsh_commands.py | 147 | 1243 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from .basecase import BaseTestCase, cqlsh
class TestCqlshCommands(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_show(self):
pass
def test_describe(self):
pass
def test_exit(self):
pass
def test_help(self):
pass
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/boto/roboto/awsqueryrequest.py | 153 | 18579 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os
import boto
import optparse
import copy
import boto.exception
import boto.roboto.awsqueryservice
import bdb
import traceback
try:
import epdb as debugger
except ImportError:
import pdb as debugger
def boto_except_hook(debugger_flag, debug_flag):
def excepthook(typ, value, tb):
if typ is bdb.BdbQuit:
sys.exit(1)
sys.excepthook = sys.__excepthook__
if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty():
if debugger.__name__ == 'epdb':
debugger.post_mortem(tb, typ, value)
else:
debugger.post_mortem(tb)
elif debug_flag:
print(traceback.print_tb(tb))
sys.exit(1)
else:
print(value)
sys.exit(1)
return excepthook
class Line(object):
def __init__(self, fmt, data, label):
self.fmt = fmt
self.data = data
self.label = label
self.line = '%s\t' % label
self.printed = False
def append(self, datum):
self.line += '%s\t' % datum
def print_it(self):
if not self.printed:
print(self.line)
self.printed = True
class RequiredParamError(boto.exception.BotoClientError):
def __init__(self, required):
self.required = required
s = 'Required parameters are missing: %s' % self.required
super(RequiredParamError, self).__init__(s)
class EncoderError(boto.exception.BotoClientError):
def __init__(self, error_msg):
s = 'Error encoding value (%s)' % error_msg
super(EncoderError, self).__init__(s)
class FilterError(boto.exception.BotoClientError):
def __init__(self, filters):
self.filters = filters
s = 'Unknown filters: %s' % self.filters
super(FilterError, self).__init__(s)
class Encoder(object):
@classmethod
def encode(cls, p, rp, v, label=None):
if p.name.startswith('_'):
return
try:
mthd = getattr(cls, 'encode_'+p.ptype)
mthd(p, rp, v, label)
except AttributeError:
raise EncoderError('Unknown type: %s' % p.ptype)
@classmethod
def encode_string(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
encode_file = encode_string
encode_enum = encode_string
@classmethod
def encode_integer(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = '%d' % v
@classmethod
def encode_boolean(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
if v:
v = 'true'
else:
v = 'false'
rp[label] = v
@classmethod
def encode_datetime(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
@classmethod
def encode_array(cls, p, rp, v, l):
v = boto.utils.mklist(v)
if l:
label = l
else:
label = p.name
label = label + '.%d'
for i, value in enumerate(v):
rp[label%(i+1)] = value
class AWSQueryRequest(object):
ServiceClass = None
Description = ''
Params = []
Args = []
Filters = []
Response = {}
CLITypeMap = {'string' : 'string',
'integer' : 'int',
'int' : 'int',
'enum' : 'choice',
'datetime' : 'string',
'dateTime' : 'string',
'file' : 'string',
'boolean' : None}
@classmethod
def name(cls):
return cls.__name__
def __init__(self, **args):
self.args = args
self.parser = None
self.cli_options = None
self.cli_args = None
self.cli_output_format = None
self.connection = None
self.list_markers = []
self.item_markers = []
self.request_params = {}
self.connection_args = None
def __repr__(self):
return self.name()
def get_connection(self, **args):
if self.connection is None:
self.connection = self.ServiceClass(**args)
return self.connection
@property
def status(self):
retval = None
if self.http_response is not None:
retval = self.http_response.status
return retval
@property
def reason(self):
retval = None
if self.http_response is not None:
retval = self.http_response.reason
return retval
@property
def request_id(self):
retval = None
if self.aws_response is not None:
retval = getattr(self.aws_response, 'requestId')
return retval
def process_filters(self):
filters = self.args.get('filters', [])
filter_names = [f['name'] for f in self.Filters]
unknown_filters = [f for f in filters if f not in filter_names]
if unknown_filters:
raise FilterError('Unknown filters: %s' % unknown_filters)
for i, filter in enumerate(self.Filters):
name = filter['name']
if name in filters:
self.request_params['Filter.%d.Name' % (i+1)] = name
for j, value in enumerate(boto.utils.mklist(filters[name])):
Encoder.encode(filter, self.request_params, value,
'Filter.%d.Value.%d' % (i+1, j+1))
def process_args(self, **args):
"""
Responsible for walking through Params defined for the request and:
* Matching them with keyword parameters passed to the request
constructor or via the command line.
* Checking to see if all required parameters have been specified
and raising an exception, if not.
* Encoding each value into the set of request parameters that will
be sent in the request to the AWS service.
"""
self.args.update(args)
self.connection_args = copy.copy(self.args)
if 'debug' in self.args and self.args['debug'] >= 2:
boto.set_stream_logger(self.name())
required = [p.name for p in self.Params+self.Args if not p.optional]
for param in self.Params+self.Args:
if param.long_name:
python_name = param.long_name.replace('-', '_')
else:
python_name = boto.utils.pythonize_name(param.name, '_')
value = None
if python_name in self.args:
value = self.args[python_name]
if value is None:
value = param.default
if value is not None:
if param.name in required:
required.remove(param.name)
if param.request_param:
if param.encoder:
param.encoder(param, self.request_params, value)
else:
Encoder.encode(param, self.request_params, value)
if python_name in self.args:
del self.connection_args[python_name]
if required:
l = []
for p in self.Params+self.Args:
if p.name in required:
if p.short_name and p.long_name:
l.append('(%s, %s)' % (p.optparse_short_name,
p.optparse_long_name))
elif p.short_name:
l.append('(%s)' % p.optparse_short_name)
else:
l.append('(%s)' % p.optparse_long_name)
raise RequiredParamError(','.join(l))
boto.log.debug('request_params: %s' % self.request_params)
self.process_markers(self.Response)
def process_markers(self, fmt, prev_name=None):
if fmt and fmt['type'] == 'object':
for prop in fmt['properties']:
self.process_markers(prop, fmt['name'])
elif fmt and fmt['type'] == 'array':
self.list_markers.append(prev_name)
self.item_markers.append(fmt['name'])
def send(self, verb='GET', **args):
self.process_args(**args)
self.process_filters()
conn = self.get_connection(**self.connection_args)
self.http_response = conn.make_request(self.name(),
self.request_params,
verb=verb)
self.body = self.http_response.read()
boto.log.debug(self.body)
if self.http_response.status == 200:
self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers,
item_marker=self.item_markers)
h = boto.jsonresponse.XmlHandler(self.aws_response, self)
h.parse(self.body)
return self.aws_response
else:
boto.log.error('%s %s' % (self.http_response.status,
self.http_response.reason))
boto.log.error('%s' % self.body)
raise conn.ResponseError(self.http_response.status,
self.http_response.reason,
self.body)
def add_standard_options(self):
group = optparse.OptionGroup(self.parser, 'Standard Options')
# add standard options that all commands get
group.add_option('-D', '--debug', action='store_true',
help='Turn on all debugging output')
group.add_option('--debugger', action='store_true',
default=False,
help='Enable interactive debugger on error')
group.add_option('-U', '--url', action='store',
help='Override service URL with value provided')
group.add_option('--region', action='store',
help='Name of the region to connect to')
group.add_option('-I', '--access-key-id', action='store',
help='Override access key value')
group.add_option('-S', '--secret-key', action='store',
help='Override secret key value')
group.add_option('--version', action='store_true',
help='Display version string')
if self.Filters:
self.group.add_option('--help-filters', action='store_true',
help='Display list of available filters')
self.group.add_option('--filter', action='append',
metavar=' name=value',
help='A filter for limiting the results')
self.parser.add_option_group(group)
def process_standard_options(self, options, args, d):
if hasattr(options, 'help_filters') and options.help_filters:
print('Available filters:')
for filter in self.Filters:
print('%s\t%s' % (filter.name, filter.doc))
sys.exit(0)
if options.debug:
self.args['debug'] = 2
if options.url:
self.args['url'] = options.url
if options.region:
self.args['region'] = options.region
if options.access_key_id:
self.args['aws_access_key_id'] = options.access_key_id
if options.secret_key:
self.args['aws_secret_access_key'] = options.secret_key
if options.version:
# TODO - Where should the version # come from?
print('version x.xx')
exit(0)
sys.excepthook = boto_except_hook(options.debugger,
options.debug)
def get_usage(self):
s = 'usage: %prog [options] '
l = [ a.long_name for a in self.Args ]
s += ' '.join(l)
for a in self.Args:
if a.doc:
s += '\n\n\t%s - %s' % (a.long_name, a.doc)
return s
def build_cli_parser(self):
self.parser = optparse.OptionParser(description=self.Description,
usage=self.get_usage())
self.add_standard_options()
for param in self.Params:
ptype = action = choices = None
if param.ptype in self.CLITypeMap:
ptype = self.CLITypeMap[param.ptype]
action = 'store'
if param.ptype == 'boolean':
action = 'store_true'
elif param.ptype == 'array':
if len(param.items) == 1:
ptype = param.items[0]['type']
action = 'append'
elif param.cardinality != 1:
action = 'append'
if ptype or action == 'store_true':
if param.short_name:
self.parser.add_option(param.optparse_short_name,
param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
elif param.long_name:
self.parser.add_option(param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
def do_cli(self):
if not self.parser:
self.build_cli_parser()
self.cli_options, self.cli_args = self.parser.parse_args()
d = {}
self.process_standard_options(self.cli_options, self.cli_args, d)
for param in self.Params:
if param.long_name:
p_name = param.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(param.name)
value = getattr(self.cli_options, p_name)
if param.ptype == 'file' and value:
if value == '-':
value = sys.stdin.read()
else:
path = os.path.expanduser(value)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
value = fp.read()
fp.close()
else:
self.parser.error('Unable to read file: %s' % path)
d[p_name] = value
for arg in self.Args:
if arg.long_name:
p_name = arg.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(arg.name)
value = None
if arg.cardinality == 1:
if len(self.cli_args) >= 1:
value = self.cli_args[0]
else:
value = self.cli_args
d[p_name] = value
self.args.update(d)
if hasattr(self.cli_options, 'filter') and self.cli_options.filter:
d = {}
for filter in self.cli_options.filter:
name, value = filter.split('=')
d[name] = value
if 'filters' in self.args:
self.args['filters'].update(d)
else:
self.args['filters'] = d
try:
response = self.main()
self.cli_formatter(response)
except RequiredParamError as e:
print(e)
sys.exit(1)
except self.ServiceClass.ResponseError as err:
print('Error(%s): %s' % (err.error_code, err.error_message))
sys.exit(1)
except boto.roboto.awsqueryservice.NoCredentialsError as err:
print('Unable to find credentials.')
sys.exit(1)
except Exception as e:
print(e)
sys.exit(1)
def _generic_cli_formatter(self, fmt, data, label=''):
if fmt['type'] == 'object':
for prop in fmt['properties']:
if 'name' in fmt:
if fmt['name'] in data:
data = data[fmt['name']]
if fmt['name'] in self.list_markers:
label = fmt['name']
if label[-1] == 's':
label = label[0:-1]
label = label.upper()
self._generic_cli_formatter(prop, data, label)
elif fmt['type'] == 'array':
for item in data:
line = Line(fmt, item, label)
if isinstance(item, dict):
for field_name in item:
line.append(item[field_name])
elif isinstance(item, basestring):
line.append(item)
line.print_it()
def cli_formatter(self, data):
"""
This method is responsible for formatting the output for the
command line interface. The default behavior is to call the
generic CLI formatter which attempts to print something
reasonable. If you want specific formatting, you should
override this method and do your own thing.
:type data: dict
:param data: The data returned by AWS.
"""
if data:
self._generic_cli_formatter(self.Response, data)
| mit |
taotie12010/bigfour | openedx/core/djangoapps/content/course_structures/tests.py | 22 | 6123 | import json
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.content.course_structures.signals import listen_for_course_publish
from openedx.core.djangoapps.content.course_structures.tasks import _generate_course_structure, update_course_structure
class SignalDisconnectTestMixin(object):
"""
Mixin for tests to disable calls to signals.listen_for_course_publish when the course_published signal is fired.
"""
def setUp(self):
super(SignalDisconnectTestMixin, self).setUp()
SignalHandler.course_published.disconnect(listen_for_course_publish)
class CourseStructureTaskTests(ModuleStoreTestCase):
def setUp(self, **kwargs):
super(CourseStructureTaskTests, self).setUp()
self.course = CourseFactory.create()
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
CourseStructure.objects.all().delete()
def test_generate_course_structure(self):
blocks = {}
def add_block(block):
children = block.get_children() if block.has_children else []
blocks[unicode(block.location)] = {
"usage_key": unicode(block.location),
"block_type": block.category,
"display_name": block.display_name,
"graded": block.graded,
"format": block.format,
"children": [unicode(child.location) for child in children]
}
for child in children:
add_block(child)
add_block(self.course)
expected = {
'root': unicode(self.course.location),
'blocks': blocks
}
self.maxDiff = None
actual = _generate_course_structure(self.course.id)
self.assertDictEqual(actual, expected)
def test_structure_json(self):
"""
Although stored as compressed data, CourseStructure.structure_json should always return the uncompressed string.
"""
course_id = 'a/b/c'
structure = {
'root': course_id,
'blocks': {
course_id: {
'id': course_id
}
}
}
structure_json = json.dumps(structure)
structure = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertEqual(structure.structure_json, structure_json)
# Reload the data to ensure the init signal is fired to decompress the data.
cs = CourseStructure.objects.get(course_id=self.course.id)
self.assertEqual(cs.structure_json, structure_json)
def test_structure(self):
"""
CourseStructure.structure should return the uncompressed, JSON-parsed course structure.
"""
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c'
}
}
}
structure_json = json.dumps(structure)
cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertDictEqual(cs.structure, structure)
def test_ordered_blocks(self):
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c',
'children': [
'g/h/i'
]
},
'd/e/f': {
'id': 'd/e/f',
'children': []
},
'g/h/i': {
'id': 'h/j/k',
'children': [
'j/k/l',
'd/e/f'
]
},
'j/k/l': {
'id': 'j/k/l',
'children': []
}
}
}
in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f']
structure_json = json.dumps(structure)
retrieved_course_structure = CourseStructure.objects.create(
course_id=self.course.id, structure_json=structure_json
)
self.assertEqual(retrieved_course_structure.ordered_blocks.keys(), in_order_blocks)
def test_block_with_missing_fields(self):
"""
The generator should continue to operate on blocks/XModule that do not have graded or format fields.
"""
# TODO In the future, test logging using testfixtures.LogCapture
# (https://pythonhosted.org/testfixtures/logging.html). Talk to TestEng before adding that library.
category = 'peergrading'
display_name = 'Testing Module'
module = ItemFactory.create(parent=self.section, category=category, display_name=display_name)
structure = _generate_course_structure(self.course.id)
usage_key = unicode(module.location)
actual = structure['blocks'][usage_key]
expected = {
"usage_key": usage_key,
"block_type": category,
"display_name": display_name,
"graded": False,
"format": None,
"children": []
}
self.assertEqual(actual, expected)
def test_update_course_structure(self):
"""
Test the actual task that orchestrates data generation and updating the database.
"""
# Method requires string input
course_id = self.course.id
self.assertRaises(ValueError, update_course_structure, course_id)
# Ensure a CourseStructure object is created
structure = _generate_course_structure(course_id)
update_course_structure(unicode(course_id))
cs = CourseStructure.objects.get(course_id=course_id)
self.assertEqual(cs.course_id, course_id)
self.assertEqual(cs.structure, structure)
| agpl-3.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/contrib/gis/geos/mutable_list.py | 217 | 10972 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from django.utils.functional import total_ordering
from django.utils import six
from django.utils.six.moves import xrange
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except self._IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except self._IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| apache-2.0 |
madongfly/grpc | src/python/grpcio/grpc/framework/alpha/utilities.py | 39 | 10994 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for use with GRPC."""
from grpc.framework.alpha import interfaces
class _RpcMethodDescription(
interfaces.RpcMethodInvocationDescription,
interfaces.RpcMethodServiceDescription):
def __init__(
self, cardinality, unary_unary, unary_stream, stream_unary,
stream_stream, request_serializer, request_deserializer,
response_serializer, response_deserializer):
self._cardinality = cardinality
self._unary_unary = unary_unary
self._unary_stream = unary_stream
self._stream_unary = stream_unary
self._stream_stream = stream_stream
self._request_serializer = request_serializer
self._request_deserializer = request_deserializer
self._response_serializer = response_serializer
self._response_deserializer = response_deserializer
def cardinality(self):
"""See interfaces.RpcMethodDescription.cardinality for specification."""
return self._cardinality
def serialize_request(self, request):
"""See interfaces.RpcMethodInvocationDescription.serialize_request."""
return self._request_serializer(request)
def deserialize_request(self, serialized_request):
"""See interfaces.RpcMethodServiceDescription.deserialize_request."""
return self._request_deserializer(serialized_request)
def serialize_response(self, response):
"""See interfaces.RpcMethodServiceDescription.serialize_response."""
return self._response_serializer(response)
def deserialize_response(self, serialized_response):
"""See interfaces.RpcMethodInvocationDescription.deserialize_response."""
return self._response_deserializer(serialized_response)
def service_unary_unary(self, request, context):
"""See interfaces.RpcMethodServiceDescription.service_unary_unary."""
return self._unary_unary(request, context)
def service_unary_stream(self, request, context):
"""See interfaces.RpcMethodServiceDescription.service_unary_stream."""
return self._unary_stream(request, context)
def service_stream_unary(self, request_iterator, context):
"""See interfaces.RpcMethodServiceDescription.service_stream_unary."""
return self._stream_unary(request_iterator, context)
def service_stream_stream(self, request_iterator, context):
"""See interfaces.RpcMethodServiceDescription.service_stream_stream."""
return self._stream_stream(request_iterator, context)
def unary_unary_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a unary-request/unary-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_UNARY, None, None, None, None,
request_serializer, None, None, response_deserializer)
def unary_stream_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a unary-request/streaming-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_STREAM, None, None, None, None,
request_serializer, None, None, response_deserializer)
def stream_unary_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a streaming-request/unary-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_UNARY, None, None, None, None,
request_serializer, None, None, response_deserializer)
def stream_stream_invocation_description(
request_serializer, response_deserializer):
"""Creates an interfaces.RpcMethodInvocationDescription for an RPC method.
Args:
request_serializer: A callable that when called on a request
value returns a bytestring corresponding to that value.
response_deserializer: A callable that when called on a
bytestring returns the response value corresponding to
that bytestring.
Returns:
An interfaces.RpcMethodInvocationDescription constructed from the given
arguments representing a streaming-request/streaming-response RPC
method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_STREAM, None, None, None, None,
request_serializer, None, None, response_deserializer)
def unary_unary_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a unary-unary RPC
method that accepts a single request and an interfaces.RpcContext and
returns a single response.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a unary-request/unary-response RPC
method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_UNARY, behavior, None, None, None,
None, request_deserializer, response_serializer, None)
def unary_stream_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a unary-stream RPC
method that accepts a single request and an interfaces.RpcContext
and returns an iterator of zero or more responses.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a unary-request/streaming-response
RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.UNARY_STREAM, None, behavior, None, None,
None, request_deserializer, response_serializer, None)
def stream_unary_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a stream-unary RPC
method that accepts an iterator of zero or more requests
and an interfaces.RpcContext and returns a single response.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a streaming-request/unary-response
RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_UNARY, None, None, behavior, None,
None, request_deserializer, response_serializer, None)
def stream_stream_service_description(
behavior, request_deserializer, response_serializer):
"""Creates an interfaces.RpcMethodServiceDescription for the given behavior.
Args:
behavior: A callable that implements a stream-stream RPC
method that accepts an iterator of zero or more requests
and an interfaces.RpcContext and returns an iterator of
zero or more responses.
request_deserializer: A callable that when called on a
bytestring returns the request value corresponding to that
bytestring.
response_serializer: A callable that when called on a
response value returns the bytestring corresponding to
that value.
Returns:
An interfaces.RpcMethodServiceDescription constructed from the given
arguments representing a
streaming-request/streaming-response RPC method.
"""
return _RpcMethodDescription(
interfaces.Cardinality.STREAM_STREAM, None, None, None, behavior,
None, request_deserializer, response_serializer, None)
| bsd-3-clause |
2uller/LotF | App/Lib/stringold.py | 67 | 12881 | # module 'string' -- A collection of string operations
# Warning: most of the code you see here isn't normally used nowadays. With
# Python 1.6, many of these functions are implemented as methods on the
# standard string object. They used to be implemented by a built-in module
# called strop, but strop is now obsolete itself.
"""Common string manipulations.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
"""
from warnings import warnpy3k
warnpy3k("the stringold module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
# Case conversion helpers
_idmap = ''
for i in range(256): _idmap = _idmap + chr(i)
del i
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s):
"""strip(s) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
"""
return s.strip()
# Strip leading tabs and spaces
def lstrip(s):
"""lstrip(s) -> string
Return a copy of the string s with leading whitespace removed.
"""
return s.lstrip()
# Strip trailing tabs and spaces
def rstrip(s):
"""rstrip(s) -> string
Return a copy of the string s with trailing whitespace
removed.
"""
return s.rstrip()
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=0):
"""split(str [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is nonzero, splits into at most
maxsplit words If sep is not specified, any whitespace string
is a separator. Maxsplit defaults to 0.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# for a little bit of speed
_apply = apply
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return _apply(s.index, args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return _apply(s.rindex, args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return _apply(s.count, args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return _apply(s.find, args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return _apply(s.rfind, args)
# for a bit of speed
_float = float
_int = int
_long = long
_StringType = type('')
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
if type(s) == _StringType:
return _float(s)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Convert string to integer
def atoi(*args):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
try:
s = args[0]
except IndexError:
raise TypeError('function requires at least 1 argument: %d given' %
len(args))
# Don't catch type error resulting from too many arguments to int(). The
# error message isn't compatible but the error type is, and this function
# is complicated enough already.
if type(s) == _StringType:
return _apply(_int, args)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Convert string to long integer
def atol(*args):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
try:
s = args[0]
except IndexError:
raise TypeError('function requires at least 1 argument: %d given' %
len(args))
# Don't catch type error resulting from too many arguments to long(). The
# error message isn't compatible but the error type is, and this function
# is complicated enough already.
if type(s) == _StringType:
return _apply(_long, args)
else:
raise TypeError('argument 1: expected string, %s found' %
type(s).__name__)
# Left-justify a string
def ljust(s, width):
"""ljust(s, width) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
n = width - len(s)
if n <= 0: return s
return s + ' '*n
# Right-justify a string
def rjust(s, width):
"""rjust(s, width) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated.
"""
n = width - len(s)
if n <= 0: return s
return ' '*n + s
# Center a string
def center(s, width):
"""center(s, width) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated.
"""
n = width - len(s)
if n <= 0: return s
half = n/2
if n%2 and width%2:
# This ensures that center(center(s, i), j) = center(s, j)
half = half+1
return ' '*half + s + ' '*(n-half)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if type(x) == type(''): s = x
else: s = repr(x)
n = len(s)
if n >= width: return s
sign = ''
if s[0] in ('-', '+'):
sign, s = s[0], s[1:]
return sign + '0'*(width-n) + s
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
res = line = ''
for c in s:
if c == '\t':
c = ' '*(tabsize - len(line) % tabsize)
line = line + c
if c == '\n':
res = res + line
line = ''
return res + line
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletechars]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256.
"""
return s.translate(table, deletions)
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return join(map(capitalize, s.split(sep)), sep or ' ')
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = list(_idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return join(L, "")
# Substring replacement (global)
def replace(s, old, new, maxsplit=0):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# XXX: transitional
#
# If string objects do not have methods, then we need to use the old string.py
# library, which uses strop for many more things than just the few outlined
# below.
try:
''.upper
except AttributeError:
from stringold import *
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
| gpl-2.0 |
androidbftab1/bf-kernel | tools/perf/scripts/python/netdev-times.py | 1544 | 15191 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
nmayorov/scikit-learn | sklearn/utils/sparsetools/tests/test_traversal.py | 315 | 2001 | from __future__ import division, print_function, absolute_import
from nose import SkipTest
import numpy as np
from numpy.testing import assert_array_almost_equal
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
| bsd-3-clause |
msreis/SigNetSim | signetsim/json/JsonRequest.py | 2 | 1145 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" JsonRequest.py
This file...
"""
from django.http import JsonResponse
from django.views.generic import View
class JsonRequest(View):
def __init__(self):
View.__init__(self)
self.data = {}
def get(self, request, *args, **kwargs):
return JsonResponse(self.data)
def post(self, request, *args, **kwargs):
return JsonResponse(self.data)
| agpl-3.0 |
XiaosongWei/crosswalk-test-suite | wrt/wrt-manifest2-android-tests/manifest2/descriptiontest.py | 4 | 3270 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestManifestFunctions(unittest.TestCase):
def test_description_chinese(self):
comm.setUp()
manifestPath = comm.ConstPath + \
"/../testapp/manifest_desc_chinese_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
comm.gen_pkg(cmd, self)
def test_description_long(self):
comm.setUp()
manifestPath = comm.ConstPath + \
"/../testapp/manifest_desc_long_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
comm.gen_pkg(cmd, self)
def test_description_null(self):
comm.setUp()
manifestPath = comm.ConstPath + \
"/../testapp/manifest_desc_null_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
def test_description_specific(self):
comm.setUp()
manifestPath = comm.ConstPath + \
"/../testapp/manifest_desc_specific_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
comm.gen_pkg(cmd, self)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lizardsystem/lizard-layers | lizard_layers/migrations/0005_auto__add_field_areavalue_flag__add_field_areavalue_comment.py | 1 | 13178 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AreaValue.flag'
db.add_column('lizard_layers_areavalue', 'flag', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'AreaValue.comment'
db.add_column('lizard_layers_areavalue', 'comment', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'AreaValue.flag'
db.delete_column('lizard_layers_areavalue', 'flag')
# Deleting field 'AreaValue.comment'
db.delete_column('lizard_layers_areavalue', 'comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'area_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'dt_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 3, 28, 11, 32, 38, 519893)'}),
'dt_latestchanged': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dt_latestsynchronized': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'areasort': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'areasort_krw': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'dt_latestchanged_krw': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'edited_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'edited_by': ('django.db.models.fields.TextField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}),
'watertype_krw': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_fewsnorm.parametercache': {
'Meta': {'ordering': "('ident',)", 'object_name': 'ParameterCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_layers.areavalue': {
'Meta': {'object_name': 'AreaValue'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_layers.ValueType']", 'null': 'True', 'blank': 'True'})
},
'lizard_layers.parametertype': {
'Meta': {'object_name': 'ParameterType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measuring_rod': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasuringRod']", 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']", 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_layers.ValueType']", 'null': 'True', 'blank': 'True'})
},
'lizard_layers.servermapping': {
'Meta': {'object_name': 'ServerMapping'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'external_server': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'relative_path': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'lizard_layers.valuetype': {
'Meta': {'object_name': 'ValueType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measuringrod': {
'Meta': {'object_name': 'MeasuringRod'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measuring_rod': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'measuring_rod_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasuringRod']", 'null': 'True', 'blank': 'True'}),
'sign': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'sub_measuring_rod': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['lizard_layers']
| gpl-3.0 |
gopal1cloud/neutron | neutron/plugins/brocade/tests/noscli.py | 28 | 2980 | #!/usr/bin/env python
#
# Copyright (c) 2013 Brocade Communications Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Varma Bhupatiraju (vbhupati@#brocade.com)
# Shiv Haris (sharis@brocade.com)
"""Brocade NOS Driver CLI."""
from __future__ import print_function
import argparse
from neutron.openstack.common import log as logging
from neutron.plugins.brocade.nos import nosdriver as nos
LOG = logging.getLogger(__name__)
class NOSCli(object):
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
self.driver = nos.NOSdriver()
def execute(self, cmd):
numargs = len(args.otherargs)
if args.cmd == 'create' and numargs == 1:
self._create(args.otherargs[0])
elif args.cmd == 'delete' and numargs == 1:
self._delete(args.otherargs[0])
elif args.cmd == 'associate' and numargs == 2:
self._associate(args.otherargs[0], args.otherargs[1])
elif args.cmd == 'dissociate' and numargs == 2:
self._dissociate(args.otherargs[0], args.otherargs[1])
else:
print(usage_desc)
exit(0)
def _create(self, net_id):
self.driver.create_network(self.host, self.username, self.password,
net_id)
def _delete(self, net_id):
self.driver.delete_network(self.host, self.username, self.password,
net_id)
def _associate(self, net_id, mac):
self.driver.associate_mac_to_network(
self.host, self.username, self.password, net_id, mac)
def _dissociate(self, net_id, mac):
self.driver.dissociate_mac_from_network(
self.host, self.username, self.password, net_id, mac)
usage_desc = """
Command descriptions:
create <id>
delete <id>
associate <id> <mac>
dissociate <id> <mac>
"""
parser = argparse.ArgumentParser(description='process args',
usage=usage_desc, epilog='foo bar help')
parser.add_argument('--ip', default='localhost')
parser.add_argument('--username', default='admin')
parser.add_argument('--password', default='password')
parser.add_argument('cmd')
parser.add_argument('otherargs', nargs='*')
args = parser.parse_args()
noscli = NOSCli(args.ip, args.username, args.password)
noscli.execute(args.cmd)
| apache-2.0 |
sertac/django | tests/template_tests/filter_tests/test_chaining.py | 345 | 3940 | from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class ChainingTests(SimpleTestCase):
"""
Chaining safeness-preserving filters should not alter the safe status.
"""
@setup({'chaining01': '{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}'})
def test_chaining01(self):
output = self.engine.render_to_string('chaining01', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, ' A < b . A < b ')
@setup({'chaining02':
'{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}'})
def test_chaining02(self):
output = self.engine.render_to_string('chaining02', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, ' A < b . A < b ')
# Using a filter that forces a string back to unsafe:
@setup({'chaining03': '{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}'})
def test_chaining03(self):
output = self.engine.render_to_string('chaining03', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, 'A < .A < ')
@setup({'chaining04':
'{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}'})
def test_chaining04(self):
output = self.engine.render_to_string('chaining04', {'a': 'a < b', 'b': mark_safe('a < b')})
self.assertEqual(output, 'A < .A < ')
# Using a filter that forces safeness does not lead to double-escaping
@setup({'chaining05': '{{ a|escape|capfirst }}'})
def test_chaining05(self):
output = self.engine.render_to_string('chaining05', {'a': 'a < b'})
self.assertEqual(output, 'A < b')
@setup({'chaining06': '{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}'})
def test_chaining06(self):
output = self.engine.render_to_string('chaining06', {'a': 'a < b'})
self.assertEqual(output, 'A < b')
# Force to safe, then back (also showing why using force_escape too
# early in a chain can lead to unexpected results).
@setup({'chaining07': '{{ a|force_escape|cut:";" }}'})
def test_chaining07(self):
output = self.engine.render_to_string('chaining07', {'a': 'a < b'})
self.assertEqual(output, 'a &lt b')
@setup({'chaining08': '{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}'})
def test_chaining08(self):
output = self.engine.render_to_string('chaining08', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining09': '{{ a|cut:";"|force_escape }}'})
def test_chaining09(self):
output = self.engine.render_to_string('chaining09', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining10': '{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}'})
def test_chaining10(self):
output = self.engine.render_to_string('chaining10', {'a': 'a < b'})
self.assertEqual(output, 'a < b')
@setup({'chaining11': '{{ a|cut:"b"|safe }}'})
def test_chaining11(self):
output = self.engine.render_to_string('chaining11', {'a': 'a < b'})
self.assertEqual(output, 'a < ')
@setup({'chaining12': '{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}'})
def test_chaining12(self):
output = self.engine.render_to_string('chaining12', {'a': 'a < b'})
self.assertEqual(output, 'a < ')
@setup({'chaining13': '{{ a|safe|force_escape }}'})
def test_chaining13(self):
output = self.engine.render_to_string('chaining13', {"a": "a < b"})
self.assertEqual(output, 'a < b')
@setup({'chaining14': '{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}'})
def test_chaining14(self):
output = self.engine.render_to_string('chaining14', {"a": "a < b"})
self.assertEqual(output, 'a < b')
| bsd-3-clause |
leoliujie/odoo | addons/point_of_sale/report/pos_users_product.py | 380 | 3336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class pos_user_product(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_user_product, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_data':self._get_data,
'get_user':self._get_user,
'get_total':self._get_total,
})
def _get_data(self, o):
self.total = 0.0
data={}
sql1=""" SELECT distinct(o.id) from account_bank_statement s, account_bank_statement_line l,pos_order o,pos_order_line i where i.order_id=o.id and o.state='paid' and l.statement_id=s.id and l.pos_statement_id=o.id and s.id=%d"""%(o.id)
self.cr.execute(sql1)
data = self.cr.dictfetchall()
a_l=[]
for r in data:
a_l.append(r['id'])
if len(a_l):
sql2="""SELECT sum(qty) as qty,l.price_unit*sum(l.qty) as amt,t.name as name, p.default_code as code, pu.name as uom from product_product p, product_template t,product_uom pu,pos_order_line l where order_id = %d and p.product_tmpl_id=t.id and l.product_id=p.id and pu.id=t.uom_id group by t.name,p.default_code,pu.name,l.price_unit"""%(o.id)
self.cr.execute(sql2)
data = self.cr.dictfetchall()
for d in data:
self.total += d['amt']
return data
def _get_user(self, object):
names = []
users_obj = self.pool['res.users']
for o in object:
sql = """select ru.id from account_bank_statement as abs,res_users ru
where abs.user_id = ru.id
and abs.id = %d"""%(o.id)
self.cr.execute(sql)
data = self.cr.fetchone()
if data:
user = users_obj.browse(self.cr, self.uid, data[0])
names.append(user.partner_id.name)
return list(set(names))
def _get_total(self, o):
return self.total
class report_pos_user_product(osv.AbstractModel):
_name = 'report.point_of_sale.report_usersproduct'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_usersproduct'
_wrapped_report_class = pos_user_product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dbrgn/mopidy | mopidy/local/actor.py | 17 | 1105 | from __future__ import absolute_import, unicode_literals
import logging
import pykka
from mopidy import backend
from mopidy.local import storage
from mopidy.local.library import LocalLibraryProvider
from mopidy.local.playback import LocalPlaybackProvider
logger = logging.getLogger(__name__)
class LocalBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = ['local']
libraries = []
def __init__(self, config, audio):
super(LocalBackend, self).__init__()
self.config = config
storage.check_dirs_and_files(config)
libraries = dict((l.name, l) for l in self.libraries)
library_name = config['local']['library']
if library_name in libraries:
library = libraries[library_name](config)
logger.debug('Using %s as the local library', library_name)
else:
library = None
logger.warning('Local library %s not found', library_name)
self.playback = LocalPlaybackProvider(audio=audio, backend=self)
self.library = LocalLibraryProvider(backend=self, library=library)
| apache-2.0 |
davizucon/ChatterBot | setup.py | 2 | 2150 | #!/usr/bin/env python
"""
ChatterBot setup file.
"""
from setuptools import setup
# Dynamically retrieve the version information from the chatterbot module
CHATTERBOT = __import__('chatterbot')
VERSION = CHATTERBOT.__version__
AUTHOR = CHATTERBOT.__author__
AUTHOR_EMAIL = CHATTERBOT.__email__
URL = CHATTERBOT.__url__
DESCRIPTION = CHATTERBOT.__doc__
with open('requirements.txt') as requirements:
REQUIREMENTS = requirements.readlines()
setup(
name='ChatterBot',
version=VERSION,
url=URL,
download_url='{}/tarball/{}'.format(URL, VERSION),
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
'chatterbot',
'chatterbot.input',
'chatterbot.output',
'chatterbot.storage',
'chatterbot.logic',
'chatterbot.corpus',
'chatterbot.conversation',
'chatterbot.ext',
'chatterbot.ext.django_chatterbot',
'chatterbot.ext.django_chatterbot.migrations',
'chatterbot.ext.django_chatterbot.management',
'chatterbot.ext.django_chatterbot.management.commands'
],
package_dir={'chatterbot': 'chatterbot'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='BSD',
zip_safe=True,
platforms=['any'],
keywords=['ChatterBot', 'chatbot', 'chat', 'bot'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=['mock']
)
| bsd-3-clause |
schlueter/ansible | lib/ansible/modules/network/panos/panos_lic.py | 60 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
zipperX/android_kernel_oneplus_msm8974 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
AladdinSonni/youtube-dl | youtube_dl/downloader/rtmp.py | 95 | 8353 | from __future__ import unicode_literals
import os
import re
import subprocess
import time
from .common import FileDownloader
from ..compat import compat_str
from ..utils import (
check_executable,
encodeFilename,
encodeArgument,
get_exe_version,
)
def rtmpdump_version():
return get_exe_version(
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
class RtmpFD(FileDownloader):
def real_download(self, filename, info_dict):
def run_rtmpdump(args):
start = time.time()
resume_percent = None
resume_downloaded_data_len = None
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
cursor_in_new_line = True
proc_stderr_closed = False
while not proc_stderr_closed:
# read line from stderr
line = ''
while True:
char = proc.stderr.read(1)
if not char:
proc_stderr_closed = True
break
if char in [b'\r', b'\n']:
break
line += char.decode('ascii', 'replace')
if not line:
# proc_stderr_closed is True
continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
percent = float(mobj.group(2))
if not resume_percent:
resume_percent = percent
resume_downloaded_data_len = downloaded_data_len
time_now = time.time()
eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
data_len = None
if percent > 0:
data_len = int(downloaded_data_len * 100 / percent)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': downloaded_data_len,
'total_bytes_estimate': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
else:
# no percent for live streams
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
time_now = time.time()
speed = self.calc_speed(start, time_now, downloaded_data_len)
self._hook_progress({
'downloaded_bytes': downloaded_data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
elif self.params.get('verbose', False):
if not cursor_in_new_line:
self.to_screen('')
cursor_in_new_line = True
self.to_screen('[rtmpdump] ' + line)
proc.wait()
if not cursor_in_new_line:
self.to_screen('')
return proc.returncode
url = info_dict['url']
player_url = info_dict.get('player_url', None)
page_url = info_dict.get('page_url', None)
app = info_dict.get('app', None)
play_path = info_dict.get('play_path', None)
tc_url = info_dict.get('tc_url', None)
flash_version = info_dict.get('flash_version', None)
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn', None)
protocol = info_dict.get('rtmp_protocol', None)
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
continue_dl = info_dict.get('continuedl', True)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
test = self.params.get('test', False)
# Check for rtmpdump first
if not check_executable('rtmpdump', ['-h']):
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = [
'rtmpdump', '--verbose', '-r', url,
'-o', tmpfilename]
if player_url is not None:
basic_args += ['--swfVfy', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if app is not None:
basic_args += ['--app', app]
if play_path is not None:
basic_args += ['--playpath', play_path]
if tc_url is not None:
basic_args += ['--tcUrl', tc_url]
if test:
basic_args += ['--stop', '1']
if flash_version is not None:
basic_args += ['--flashVer', flash_version]
if live:
basic_args += ['--live']
if isinstance(conn, list):
for entry in conn:
basic_args += ['--conn', entry]
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn]
if protocol is not None:
basic_args += ['--protocol', protocol]
if real_time:
basic_args += ['--realtime']
args = basic_args
if not no_resume and continue_dl and not live:
args += ['--resume']
if not live and continue_dl:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
self._debug_cmd(args, exe='rtmpdump')
RD_SUCCESS = 0
RD_FAILED = 1
RD_INCOMPLETE = 2
RD_NO_CONNECT = 3
retval = run_rtmpdump(args)
if retval == RD_NO_CONNECT:
self.report_error('[rtmpdump] Could not connect to RTMP server.')
return False
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed
args = basic_args + ['--resume']
if retval == RD_FAILED:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
retval = run_rtmpdump(args)
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED:
break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = RD_SUCCESS
break
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % fsize)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('rtmpdump exited with code %d' % retval)
return False
| unlicense |
fametrano/BitcoinBlockchainTechnology | btclib/rfc6979.py | 1 | 4053 | #!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Deterministic generation of the ephemeral key following RFC6979.
https://tools.ietf.org/html/rfc6979:
ECDSA and ECSSA need to produce, for each signature generation,
a fresh random value (ephemeral key, hereafter designated as k).
For effective security, k must be chosen randomly and uniformly
from a set of modular integers, using a cryptographically secure
process. Even slight biases in that process may be turned into
attacks on the signature schemes.
The need for a cryptographically secure source of randomness proves
to be a hindranceand and makes implementations harder to test.
Moreover, reusing the same ephemeral key for a different message
signed with the same private key reveal the private key!
RFC6979 turns ECDSA into deterministic schemes by using a
deterministic process for generating the "random" value k.
The process fulfills the cryptographic characteristics in order to
maintain the properties of verifiability and unforgeability
expected from signature schemes; namely, for whoever does not know
the signature private key, the mapping from input messages to the
corresponding k values is computationally indistinguishable from
what a randomly and uniformly chosen function (from the set of
messages to the set of possible k values) would return.
"""
import hmac
from hashlib import sha256
from .alias import HashF, PrvKey, String
from .curve import Curve
from .curves import secp256k1
from .to_prvkey import int_from_prvkey
from .utils import int_from_bits
def rfc6979(
msg: String, prvkey: PrvKey, ec: Curve = secp256k1, hf: HashF = sha256
) -> int:
"""Return a deterministic ephemeral key following RFC 6979."""
# the following is strictly equivalent to dsa._challenge
if isinstance(msg, str):
msg = msg.encode()
# Steps numbering follows SEC 1 v.2 section 4.1.3
h = hf()
h.update(msg)
mhd = h.digest() # 4
# leftmost ec.nlen bits %= ec.n
c = int_from_bits(mhd, ec.nlen) % ec.n # 5
q = int_from_prvkey(prvkey, ec)
return _rfc6979(c, q, ec, hf)
def _rfc6979(c: int, q: int, ec: Curve, hf: HashF) -> int:
# https://tools.ietf.org/html/rfc6979 section 3.2
# c = hf(m) # 3.2.a
# convert the private key q to an octet sequence of size nsize
bprv = q.to_bytes(ec.nsize, "big")
# truncate and/or expand c: encoding size is driven by nsize
bc = c.to_bytes(ec.nsize, "big")
bprvbm = bprv + bc
hsize = hf().digest_size
V = b"\x01" * hsize # 3.2.b
K = b"\x00" * hsize # 3.2.c
K = hmac.new(K, V + b"\x00" + bprvbm, hf).digest() # 3.2.d
V = hmac.new(K, V, hf).digest() # 3.2.e
K = hmac.new(K, V + b"\x01" + bprvbm, hf).digest() # 3.2.f
V = hmac.new(K, V, hf).digest() # 3.2.g
while True: # 3.2.h
T = b"" # 3.2.h.1
while len(T) < ec.nsize: # 3.2.h.2
V = hmac.new(K, V, hf).digest()
T += V
# The following line would introduce a bias
# k = int.from_bytes(T, 'big') % ec.n
# In general, taking a uniformly random integer (like those
# obtained from a hash function in the random oracle model)
# modulo the curve order n would produce a biased result.
# However, if the order n is sufficiently close to 2^hlen,
# then the bias is not observable: e.g.
# for secp256k1 and sha256 1-n/2^256 it is about 1.27*2^-128
k = int_from_bits(T, ec.nlen) # candidate k # 3.2.h.3
if 0 < k < ec.n: # acceptable values for k
return k # successful candidate
K = hmac.new(K, V + b"\x00", hf).digest()
V = hmac.new(K, V, hf).digest()
| mit |
spaghetti-/rosdep | src/rosdep2/platforms/arch.py | 1 | 3009 | #!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
import subprocess
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
ARCH_OS_NAME = 'arch'
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(ARCH_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(ARCH_OS_NAME, PACMAN_INSTALLER)
context.set_default_os_installer_key(ARCH_OS_NAME, lambda self: PACMAN_INSTALLER)
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| bsd-3-clause |
baran0119/kernel_samsung_baffinlitexx | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
thiagopnts/servo | tests/wpt/web-platform-tests/webdriver/tests/actions/support/keys.py | 12 | 16359 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Keys implementation.
"""
from inspect import getmembers
import sys
class Keys(object):
"""
Set of special keys codes.
See also https://w3c.github.io/webdriver/webdriver-spec.html#h-keyboard-actions
"""
NULL = u"\ue000"
CANCEL = u"\ue001" # ^break
HELP = u"\ue002"
BACKSPACE = u"\ue003"
TAB = u"\ue004"
CLEAR = u"\ue005"
RETURN = u"\ue006"
ENTER = u"\ue007"
SHIFT = u"\ue008"
CONTROL = u"\ue009"
ALT = u"\ue00a"
PAUSE = u"\ue00b"
ESCAPE = u"\ue00c"
SPACE = u"\ue00d"
PAGE_UP = u"\ue00e"
PAGE_DOWN = u"\ue00f"
END = u"\ue010"
HOME = u"\ue011"
LEFT = u"\ue012"
UP = u"\ue013"
RIGHT = u"\ue014"
DOWN = u"\ue015"
INSERT = u"\ue016"
DELETE = u"\ue017"
SEMICOLON = u"\ue018"
EQUALS = u"\ue019"
NUMPAD0 = u"\ue01a" # number pad keys
NUMPAD1 = u"\ue01b"
NUMPAD2 = u"\ue01c"
NUMPAD3 = u"\ue01d"
NUMPAD4 = u"\ue01e"
NUMPAD5 = u"\ue01f"
NUMPAD6 = u"\ue020"
NUMPAD7 = u"\ue021"
NUMPAD8 = u"\ue022"
NUMPAD9 = u"\ue023"
MULTIPLY = u"\ue024"
ADD = u"\ue025"
SEPARATOR = u"\ue026"
SUBTRACT = u"\ue027"
DECIMAL = u"\ue028"
DIVIDE = u"\ue029"
F1 = u"\ue031" # function keys
F2 = u"\ue032"
F3 = u"\ue033"
F4 = u"\ue034"
F5 = u"\ue035"
F6 = u"\ue036"
F7 = u"\ue037"
F8 = u"\ue038"
F9 = u"\ue039"
F10 = u"\ue03a"
F11 = u"\ue03b"
F12 = u"\ue03c"
META = u"\ue03d"
# More keys from webdriver spec
ZENKAKUHANKAKU = u"\uE040"
R_SHIFT = u"\uE050"
R_CONTROL = u"\uE051"
R_ALT = u"\uE052"
R_META = u"\uE053"
R_PAGEUP = u"\uE054"
R_PAGEDOWN = u"\uE055"
R_END = u"\uE056"
R_HOME = u"\uE057"
R_ARROWLEFT = u"\uE058"
R_ARROWUP = u"\uE059"
R_ARROWRIGHT = u"\uE05A"
R_ARROWDOWN = u"\uE05B"
R_INSERT = u"\uE05C"
R_DELETE = u"\uE05D"
ALL_KEYS = getmembers(Keys, lambda x: type(x) == unicode)
ALL_EVENTS = {
"ADD": {
"code": "",
"ctrl": False,
"key": "+",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue025",
},
"ALT": {
"code": "AltLeft",
"ctrl": False,
"key": "Alt",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue00a",
},
"BACKSPACE": {
"code": "Backspace",
"ctrl": False,
"key": "Backspace",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue003",
},
"CANCEL": {
"code": "",
"ctrl": False,
"key": "Cancel",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue001",
},
"CLEAR": {
"code": "",
"ctrl": False,
"key": "Clear",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue005",
},
"CONTROL": {
"code": "ControlLeft",
"ctrl": True,
"key": "Control",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue009",
},
"DECIMAL": {
"code": "NumpadDecimal",
"ctrl": False,
"key": ".",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue028",
},
"DELETE": {
"code": "Delete",
"ctrl": False,
"key": "Delete",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue017",
},
"DIVIDE": {
"code": "NumpadDivide",
"ctrl": False,
"key": "/",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue029",
},
"DOWN": {
"code": "ArrowDown",
"ctrl": False,
"key": "ArrowDown",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue015",
},
"END": {
"code": "End",
"ctrl": False,
"key": "End",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue010",
},
"ENTER": {
"code": "NumpadEnter",
"ctrl": False,
"key": "Enter",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue007",
},
"EQUALS": {
"code": "",
"ctrl": False,
"key": "=",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue019",
},
"ESCAPE": {
"code": "Escape",
"ctrl": False,
"key": "Escape",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00c",
},
"F1": {
"code": "F1",
"ctrl": False,
"key": "F1",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue031",
},
"F10": {
"code": "F10",
"ctrl": False,
"key": "F10",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03a",
},
"F11": {
"code": "F11",
"ctrl": False,
"key": "F11",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03b",
},
"F12": {
"code": "F12",
"ctrl": False,
"key": "F12",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03c",
},
"F2": {
"code": "F2",
"ctrl": False,
"key": "F2",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue032",
},
"F3": {
"code": "F3",
"ctrl": False,
"key": "F3",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue033",
},
"F4": {
"code": "F4",
"ctrl": False,
"key": "F4",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue034",
},
"F5": {
"code": "F5",
"ctrl": False,
"key": "F5",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue035",
},
"F6": {
"code": "F6",
"ctrl": False,
"key": "F6",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue036",
},
"F7": {
"code": "F7",
"ctrl": False,
"key": "F7",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue037",
},
"F8": {
"code": "F8",
"ctrl": False,
"key": "F8",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue038",
},
"F9": {
"code": "F9",
"ctrl": False,
"key": "F9",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue039",
},
"HELP": {
"code": "Help",
"ctrl": False,
"key": "Help",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue002",
},
"HOME": {
"code": "Home",
"ctrl": False,
"key": "Home",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue011",
},
"INSERT": {
"code": "Insert",
"ctrl": False,
"key": "Insert",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue016",
},
"LEFT": {
"code": "ArrowLeft",
"ctrl": False,
"key": "ArrowLeft",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue012",
},
"META": {
"code": "OSLeft",
"ctrl": False,
"key": "Meta",
"location": 1,
"meta": True,
"shift": False,
"value": u"\ue03d",
},
"MULTIPLY": {
"code": "NumpadMultiply",
"ctrl": False,
"key": "*",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue024",
},
"NULL": {
"code": "",
"ctrl": False,
"key": "Unidentified",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue000",
},
"NUMPAD0": {
"code": "Numpad0",
"ctrl": False,
"key": "0",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01a",
},
"NUMPAD1": {
"code": "Numpad1",
"ctrl": False,
"key": "1",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01b",
},
"NUMPAD2": {
"code": "Numpad2",
"ctrl": False,
"key": "2",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01c",
},
"NUMPAD3": {
"code": "Numpad3",
"ctrl": False,
"key": "3",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01d",
},
"NUMPAD4": {
"code": "PageDown",
"ctrl": False,
"key": "4",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01e",
},
"NUMPAD5": {
"code": "PageUp",
"ctrl": False,
"key": "5",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01f",
},
"NUMPAD6": {
"code": "Numpad6",
"ctrl": False,
"key": "6",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue020",
},
"NUMPAD7": {
"code": "Numpad7",
"ctrl": False,
"key": "7",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue021",
},
"NUMPAD8": {
"code": "Numpad8",
"ctrl": False,
"key": "8",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue022",
},
"NUMPAD9": {
"code": "Numpad9",
"ctrl": False,
"key": "9",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue023",
},
"PAGE_DOWN": {
"code": "",
"ctrl": False,
"key": "PageDown",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00f",
},
"PAGE_UP": {
"code": "",
"ctrl": False,
"key": "PageUp",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00e",
},
"PAUSE": {
"code": "",
"ctrl": False,
"key": "Pause",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00b",
},
"RETURN": {
"code": "Enter",
"ctrl": False,
"key": "Enter",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue006",
},
"RIGHT": {
"code": "ArrowRight",
"ctrl": False,
"key": "ArrowRight",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue014",
},
"R_ALT": {
"code": "AltRight",
"ctrl": False,
"key": "Alt",
"location": 2,
"meta": False,
"shift": False,
"value": u"\ue052",
},
"R_ARROWDOWN": {
"code": "Numpad2",
"ctrl": False,
"key": "ArrowDown",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05b",
},
"R_ARROWLEFT": {
"code": "Numpad4",
"ctrl": False,
"key": "ArrowLeft",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue058",
},
"R_ARROWRIGHT": {
"code": "Numpad6",
"ctrl": False,
"key": "ArrowRight",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05a",
},
"R_ARROWUP": {
"code": "Numpad8",
"ctrl": False,
"key": "ArrowUp",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue059",
},
"R_CONTROL": {
"code": "ControlRight",
"ctrl": True,
"key": "Control",
"location": 2,
"meta": False,
"shift": False,
"value": u"\ue051",
},
"R_DELETE": {
"code": "NumpadDecimal",
"ctrl": False,
"key": "Delete",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05d",
},
"R_END": {
"code": "Numpad1",
"ctrl": False,
"key": "End",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue056",
},
"R_HOME": {
"code": "Numpad7",
"ctrl": False,
"key": "Home",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue057",
},
"R_INSERT": {
"code": "Numpad0",
"ctrl": False,
"key": "Insert",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05c",
},
"R_META": {
"code": "OSRight",
"ctrl": False,
"key": "Meta",
"location": 2,
"meta": True,
"shift": False,
"value": u"\ue053",
},
"R_PAGEDOWN": {
"code": "Numpad3",
"ctrl": False,
"key": "PageDown",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue055",
},
"R_PAGEUP": {
"code": "Numpad9",
"ctrl": False,
"key": "PageUp",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue054",
},
"R_SHIFT": {
"code": "ShiftRight",
"ctrl": False,
"key": "Shift",
"location": 2,
"meta": False,
"shift": True,
"value": u"\ue050",
},
"SEMICOLON": {
"code": "",
"ctrl": False,
"key": ";",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue018",
},
"SEPARATOR": {
"code": "NumpadSubtract",
"ctrl": False,
"key": ",",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue026",
},
"SHIFT": {
"code": "ShiftLeft",
"ctrl": False,
"key": "Shift",
"location": 1,
"meta": False,
"shift": True,
"value": u"\ue008",
},
"SPACE": {
"code": "Space",
"ctrl": False,
"key": " ",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00d",
},
"SUBTRACT": {
"code": "",
"ctrl": False,
"key": "-",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue027",
},
"TAB": {
"code": "Tab",
"ctrl": False,
"key": "Tab",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue004",
},
"UP": {
"code": "ArrowUp",
"ctrl": False,
"key": "ArrowUp",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue013",
},
"ZENKAKUHANKAKU": {
"code": "",
"ctrl": False,
"key": "ZenkakuHankaku",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue040",
}
}
if sys.platform == 'darwin':
MODIFIER_KEY = Keys.META
else:
MODIFIER_KEY = Keys.CONTROL
| mpl-2.0 |
objarni/splinter | setup.py | 4 | 1253 | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from setuptools import setup, find_packages
import codecs
README = codecs.open('README.rst', encoding='utf-8').read()
setup(
name='splinter',
version='0.7.3',
url='https://github.com/cobrateam/splinter',
description='browser abstraction for web acceptance testing',
long_description=README,
author='CobraTeam',
author_email='andrewsmedina@gmail.com',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
] + [('Programming Language :: Python :: %s' % x) for x in '2.6 2.7 3.3 3.4'.split()],
packages=find_packages(exclude=['docs', 'tests', 'samples']),
include_package_data=True,
install_requires=['selenium>=2.47.1'],
extras_require={'zope.testbrowser': ['zope.testbrowser>=4.0.4',
'lxml>=2.3.6', 'cssselect'],
'django': ['Django>=1.5.8,<1.9', 'lxml>=2.3.6', 'cssselect', 'six'],
'flask': ['Flask>=0.10', 'lxml>=2.3.6', 'cssselect']},
tests_require=['coverage', 'flask'],
)
| bsd-3-clause |
roadmapper/ansible | test/units/modules/network/fortimanager/test_fmgr_secprof_web.py | 38 | 2484 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_secprof_web
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_secprof_web.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_webfilter_profile_modify(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
output = fmgr_secprof_web.fmgr_webfilter_profile_modify(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
| gpl-3.0 |
joakim-hove/opm-cmake | python/pybind11/tests/test_multiple_inheritance.py | 31 | 9225 | import pytest
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
def test_multiple_inheritance_cpp():
mt = m.MIType(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
self.i = i
def foo(self):
return self.i
class MITypePy(Base1, m.Base2):
def __init__(self, i, j):
Base1.__init__(self, i)
m.Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix2():
class Base2:
def __init__(self, i):
self.i = i
def bar(self):
return self.i
class MITypePy(m.Base1, Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
# This should be ignored (Base2 is already initialized via MI2):
m.Base2.__init__(self, i + 100)
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert mi1.foo() == 1
assert mi1.bar() == 2
mi2 = MI2(3, 4)
assert mi2.v() == 1
assert mi2.foo() == 3
assert mi2.bar() == 4
mi3 = MI3(5, 6)
assert mi3.v() == 1
assert mi3.foo() == 5
assert mi3.bar() == 6
mi4 = MI4(7, 8)
assert mi4.v() == 1
assert mi4.foo() == 7
assert mi4.bar() == 8
mi5 = MI5(10, 11)
assert mi5.v() == 1
assert mi5.foo() == 10
assert mi5.bar() == 11
mi6 = MI6(12)
assert mi6.v() == 1
assert mi6.bar() == 12
mi7 = MI7(13)
assert mi7.v() == 4
assert mi7.bar() == 13
mi8 = MI8(14)
assert mi8.v() == 1
assert mi8.bar() == 14
mi8b = MI8b(15)
assert mi8b.v() == 3
assert mi8b.bar() == 15
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15,
m.BaseN16):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
# Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:
a = MIMany14()
for i in range(1, 4):
assert getattr(a, "f" + str(i))() == 2 * i
# Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:
b = MIMany916()
for i in range(9, 16):
assert getattr(b, "f" + str(i))() == 2 * i
# Inherits from 9: requires >= 2 pointers worth of holder flags
c = MIMany19()
for i in range(1, 9):
assert getattr(c, "f" + str(i))() == 2 * i
# Inherits from 17: requires >= 3 pointers worth of holder flags
d = MIMany117()
for i in range(1, 17):
assert getattr(d, "f" + str(i))() == 2 * i
def test_multiple_inheritance_virtbase():
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
mt = MITypePy(3, 4)
assert mt.bar() == 4
assert m.bar_base2a(mt) == 4
assert m.bar_base2a_sharedptr(mt) == 4
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_func1() == "WithStatic1"
assert d.static_func2() == "WithStatic2"
assert d.static_func() == d.__class__.__name__
m.WithStatic1.static_value1 = 1
m.WithStatic2.static_value2 = 2
assert d.static_value1 == 1
assert d.static_value2 == 2
assert d.static_value == 12
d.static_value1 = 0
assert d.static_value1 == 0
d.static_value2 = 0
assert d.static_value2 == 0
d.static_value = 0
assert d.static_value == 0
@pytest.unsupported_on_pypy
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):
d.dynamic = 1
assert d.dynamic == 1
def test_mi_unaligned_base():
"""Returning an offset (non-first MI) base class pointer should recognize the instance"""
n_inst = ConstructorStats.detail_reg_inst()
c = m.I801C()
d = m.I801D()
# + 4 below because we have the two instances, and each instance has offset base I801B2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
b1c = m.i801b1_c(c)
assert b1c is c
b2c = m.i801b2_c(c)
assert b2c is c
b1d = m.i801b1_d(d)
assert b1d is d
b2d = m.i801b2_d(d)
assert b2d is d
assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances
del c, b1c, b2c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del d, b1d, b2d
assert ConstructorStats.detail_reg_inst() == n_inst
def test_mi_base_return():
"""Tests returning an offset (non-first MI) base class pointer to a derived instance"""
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert type(c1) is m.I801C
assert c1.a == 1
assert c1.b == 2
d1 = m.i801d_b1()
assert type(d1) is m.I801D
assert d1.a == 1
assert d1.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
c2 = m.i801c_b2()
assert type(c2) is m.I801C
assert c2.a == 1
assert c2.b == 2
d2 = m.i801d_b2()
assert type(d2) is m.I801D
assert d2.a == 1
assert d2.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 8
del c2
assert ConstructorStats.detail_reg_inst() == n_inst + 6
del c1, d1, d2
assert ConstructorStats.detail_reg_inst() == n_inst
# Returning an unregistered derived type with a registered base; we won't
# pick up the derived type, obviously, but should still work (as an object
# of whatever type was returned).
e1 = m.i801e_c()
assert type(e1) is m.I801C
assert e1.a == 1
assert e1.b == 2
e2 = m.i801e_b2()
assert type(e2) is m.I801B2
assert e2.b == 2
def test_diamond_inheritance():
"""Tests that diamond inheritance works as expected (issue #959)"""
# Issue #959: this shouldn't segfault:
d = m.D()
# Make sure all the various distinct pointers are all recognized as registered instances:
assert d is d.c0()
assert d is d.c1()
assert d is d.b()
assert d is d.c0().b()
assert d is d.c1().b()
assert d is d.c0().c1().b().c0().b()
| gpl-3.0 |
Piratas/coleta-site | languages/zh.py | 152 | 10080 | # coding: utf8
{
'!langcode!': 'zh-tw',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"',
'%s %%{row} deleted': '已刪除 %s 筆',
'%s %%{row} updated': '已更新 %s 筆',
'%s selected': '%s 已選擇',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式類似 "zh-tw")',
'A new version of web2py is available': '新版的 web2py 已發行',
'A new version of web2py is available: %s': '新版的 web2py 已發行: %s',
'about': '關於',
'About': '關於',
'About application': '關於本應用程式',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '點此處進入管理介面',
'Administrator Password:': '管理員密碼:',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉',
'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"',
'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例',
'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!',
'Authentication': '驗證',
'Available Databases and Tables': '可提供的資料庫和資料表',
'Buy this book': 'Buy this book',
'cache': '快取記憶體',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.',
'Change Password': '變更密碼',
'change password': '變更密碼',
'Check to delete': '打勾代表刪除',
'Check to delete:': '點選以示刪除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客戶端網址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版權所有',
'Create new application': '創建應用程式',
'Current request': '目前網路資料要求(request)',
'Current response': '目前網路資料回應(response)',
'Current session': '目前網路連線資訊(session)',
'customize me!': '請調整我!',
'data uploaded': '資料已上傳',
'Database': '資料庫',
'Database %s select': '已選擇 %s 資料庫',
'Date and Time': '日期和時間',
'db': 'db',
'DB Model': '資料庫模組',
'Delete': '刪除',
'Delete:': '刪除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '配置到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '設計',
'design': '設計',
'Design for': '設計為了',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': 'Download',
'E-mail': '電子郵件',
'EDIT': '編輯',
'Edit': '編輯',
'Edit application': '編輯應用程式',
'Edit current record': '編輯當前紀錄',
'edit profile': '編輯設定檔',
'Edit Profile': '編輯設定檔',
'Edit This App': '編輯本應用程式',
'Editing file': '編輯檔案',
'Editing file "%s"': '編輯檔案"%s"',
'Email and SMS': 'Email and SMS',
'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄',
'Errors': 'Errors',
'export as csv file': '以逗號分隔檔(csv)格式匯出',
'FAQ': 'FAQ',
'First name': '名',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].',
'Group ID': '群組編號',
'Groups': 'Groups',
'Hello World': '嗨! 世界',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '匯入/匯出',
'Index': '索引',
'insert new': '插入新資料',
'insert new %s': '插入新資料 %s',
'Installed applications': '已安裝應用程式',
'Internal State': '內部狀態',
'Introduction': 'Introduction',
'Invalid action': '不合法的動作(action)',
'Invalid email': '不合法的電子郵件',
'Invalid Query': '不合法的查詢',
'invalid request': '不合法的網路要求(request)',
'Key': 'Key',
'Language files (static strings) updated': '語言檔已更新',
'Languages': '各國語言',
'Last name': '姓',
'Last saved on:': '最後儲存時間:',
'Layout': '網頁配置',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '軟體版權為',
'Live Chat': 'Live Chat',
'login': '登入',
'Login': '登入',
'Login to the Administrative Interface': '登入到管理員介面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '密碼遺忘',
'Main Menu': '主選單',
'Manage Cache': 'Manage Cache',
'Menu Model': '選單模組(menu)',
'Models': '資料模組',
'Modules': '程式模組',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新紀錄',
'new record inserted': '已插入新紀錄',
'next 100 rows': '往後 100 筆',
'NO': '否',
'No databases in this application': '這應用程式不含資料庫',
'Online examples': '點此處進入線上範例',
'or import from csv file': '或是從逗號分隔檔(CSV)匯入',
'Origin': '原文',
'Original/Translation': '原文/翻譯',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': '密碼',
"Password fields don't match": '密碼欄不匹配',
'Peeking at file': '選擇檔案',
'Plugins': 'Plugins',
'Powered by': '基於以下技術構建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 筆',
'Python': 'Python',
'Query:': '查詢:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '紀錄',
'record does not exist': '紀錄不存在',
'Record ID': '紀錄編號',
'Record id': '紀錄編號',
'Register': '註冊',
'register': '註冊',
'Registration key': '註冊金鑰',
'Remember me (for 30 days)': '記住我(30 天)',
'Reset Password key': '重設密碼',
'Resolve Conflict file': '解決衝突檔案',
'Role': '角色',
'Rows in Table': '在資料表裏的資料',
'Rows selected': '筆資料被選擇',
'Saved file hash:': '檔案雜湊值已紀錄:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '狀態',
'Static files': '靜態檔案',
'Statistics': 'Statistics',
'Stylesheet': '網頁風格檔',
'submit': 'submit',
'Submit': '傳送',
'Support': 'Support',
'Sure you want to delete this object?': '確定要刪除此物件?',
'Table': '資料表',
'Table name': '資料表名稱',
'Testing application': '測試中的應用程式',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有資料庫模組(models)',
'There are no modules': '沒有程式模組(modules)',
'There are no static files': '沒有靜態檔案',
'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言',
'There are no views': '沒有視圖',
'This App': 'This App',
'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)',
'Ticket': '問題單',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '時間標記',
'Twitter': 'Twitter',
'Unable to check for upgrades': '無法做升級檢查',
'Unable to download': '無法下載',
'Unable to download app': '無法下載應用程式',
'unable to parse csv file': '無法解析逗號分隔檔(csv)',
'Update:': '更新:',
'Upload existing application': '更新存在的應用程式',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.',
'User %(id)s Logged-in': '使用者 %(id)s 已登入',
'User %(id)s Registered': '使用者 %(id)s 已註冊',
'User ID': '使用者編號',
'Verify Password': '驗證密碼',
'Videos': 'Videos',
'View': '視圖',
'Views': '視圖',
'Welcome %s': '歡迎 %s',
'Welcome to web2py': '歡迎使用 web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
vbannai/neutron | neutron/plugins/vmware/dhcp_meta/constants.py | 36 | 1139 | # Copyright 2014 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.common import constants as const
from neutron.db import l3_db
# A unique MAC to quickly identify the LSN port used for metadata services
# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'.
METADATA_MAC = "fa:15:73:74:d4:74"
METADATA_PORT_ID = 'metadata:id'
METADATA_PORT_NAME = 'metadata:name'
METADATA_DEVICE_ID = 'metadata:device'
SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP,
const.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF)
| apache-2.0 |
pschella/scipy | scipy/sparse/tests/test_csr.py | 127 | 1499 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite, assert_
from scipy.sparse import csr_matrix
def _check_csr_rowslice(i, sl, X, Xcsr):
np_slice = X[i, sl]
csr_slice = Xcsr[i, sl]
assert_array_almost_equal(np_slice, csr_slice.toarray()[0])
assert_(type(csr_slice) is csr_matrix)
def test_csr_rowslice():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
slices = [slice(None, None, None),
slice(None, None, -1),
slice(1, -2, 2),
slice(-2, 1, -2)]
for i in range(N):
for sl in slices:
yield _check_csr_rowslice, i, sl, X, Xcsr
def test_csr_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csr_row = Xcsr.getrow(i)
assert_array_almost_equal(arr_row, csr_row.toarray())
assert_(type(csr_row) is csr_matrix)
def test_csr_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csr_col = Xcsr.getcol(i)
assert_array_almost_equal(arr_col, csr_col.toarray())
assert_(type(csr_col) is csr_matrix)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
anryko/ansible | lib/ansible/modules/storage/netapp/_sf_check_connections.py | 59 | 5302 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_check_connections
deprecated:
removed_in: "2.11"
why: This Module has been replaced
alternative: please use M(na_elementsw_check_connections)
short_description: Check connectivity to MVIP and SVIP.
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
description:
- Used to test the management connection to the cluster.
- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
options:
skip:
description:
- Skip checking connection to SVIP or MVIP.
choices: ['svip', 'mvip']
mvip:
description:
- Optionally, use to test connection of a different MVIP.
- This is not needed to test the connection to the target cluster.
svip:
description:
- Optionally, use to test connection of a different SVIP.
- This is not needed to test the connection to the target cluster.
'''
EXAMPLES = """
- name: Check connections to MVIP and SVIP
sf_check_connections:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireConnection(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
mvip=dict(required=False, type='str', default=None),
svip=dict(required=False, type='str', default=None)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.skip = p['skip']
self.mvip = p['mvip']
self.svip = p['svip']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
def check_mvip_connection(self):
"""
Check connection to MVIP
:return: true if connection was successful, false otherwise.
:rtype: bool
"""
try:
test = self.sfe.test_connect_mvip(mvip=self.mvip)
result = test.details.connected
# Todo - Log details about the test
return result
except Exception as e:
self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
return False
def check_svip_connection(self):
"""
Check connection to SVIP
:return: true if connection was successful, false otherwise.
:rtype: bool
"""
try:
test = self.sfe.test_connect_svip(svip=self.svip)
result = test.details.connected
# Todo - Log details about the test
return result
except Exception as e:
self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
return False
def check(self):
failed = True
msg = ''
if self.skip is None:
mvip_connection_established = self.check_mvip_connection()
svip_connection_established = self.check_svip_connection()
# Set failed and msg
if not mvip_connection_established:
failed = True
msg = 'Connection to MVIP failed.'
elif not svip_connection_established:
failed = True
msg = 'Connection to SVIP failed.'
else:
failed = False
elif self.skip == 'mvip':
svip_connection_established = self.check_svip_connection()
# Set failed and msg
if not svip_connection_established:
failed = True
msg = 'Connection to SVIP failed.'
else:
failed = False
elif self.skip == 'svip':
mvip_connection_established = self.check_mvip_connection()
# Set failed and msg
if not mvip_connection_established:
failed = True
msg = 'Connection to MVIP failed.'
else:
failed = False
if failed:
self.module.fail_json(msg=msg)
else:
self.module.exit_json()
def main():
v = SolidFireConnection()
v.check()
if __name__ == '__main__':
main()
| gpl-3.0 |
ierceg/peru | tests/test_keyval.py | 4 | 1113 | import unittest
import shared
from peru.keyval import KeyVal
class KeyValTest(unittest.TestCase):
def test_keyval(self):
root = shared.create_dir()
tmp_dir = shared.create_dir()
keyval = KeyVal(root, tmp_dir)
key = "mykey"
# keyval should be empty
self.assertFalse(key in keyval)
self.assertSetEqual(set(keyval), set())
# set a key
keyval[key] = "myval"
self.assertEqual(keyval[key], "myval")
self.assertTrue(key in keyval)
self.assertSetEqual(set(keyval), {key})
# overwrite the value
keyval[key] = "anotherval"
self.assertEqual(keyval[key], "anotherval")
# instantiate a second keyval on the same dir, should have same content
another_keyval = KeyVal(root, tmp_dir)
self.assertTrue(key in another_keyval)
self.assertEqual(another_keyval[key], "anotherval")
self.assertSetEqual(set(another_keyval), {key})
# test deletions
del keyval[key]
self.assertFalse(key in keyval)
self.assertFalse(key in another_keyval)
| mit |
sauloal/pycluster | pypy-1.9_64/lib-python/2.7/plat-mac/appletrawmain.py | 73 | 2005 | # Emulate sys.argv and run __main__.py or __main__.pyc in an environment that
# is as close to "normal" as possible.
#
# This script is put into __rawmain__.pyc for applets that need argv
# emulation, by BuildApplet and friends.
#
from warnings import warnpy3k
warnpy3k("In 3.x, the appletrawmain module is removed.", stacklevel=2)
import argvemulator
import os
import sys
import marshal
#
# Make sure we have an argv[0], and make _dir point to the Resources
# directory.
#
if not sys.argv or sys.argv[0][:1] == '-':
# Insert our (guessed) name.
_dir = os.path.split(sys.executable)[0] # removes "python"
_dir = os.path.split(_dir)[0] # Removes "MacOS"
_dir = os.path.join(_dir, 'Resources')
sys.argv.insert(0, '__rawmain__')
else:
_dir = os.path.split(sys.argv[0])[0]
#
# Add the Resources directory to the path. This is where files installed
# by BuildApplet.py with the --extra option show up, and if those files are
# modules this sys.path modification is necessary to be able to import them.
#
sys.path.insert(0, _dir)
#
# Create sys.argv
#
argvemulator.ArgvCollector().mainloop()
#
# Find the real main program to run
#
__file__ = os.path.join(_dir, '__main__.py')
if os.path.exists(__file__):
#
# Setup something resembling a normal environment and go.
#
sys.argv[0] = __file__
del argvemulator, os, sys, _dir
execfile(__file__)
else:
__file__ = os.path.join(_dir, '__main__.pyc')
if os.path.exists(__file__):
#
# If we have only a .pyc file we read the code object from that
#
sys.argv[0] = __file__
_fp = open(__file__, 'rb')
_fp.read(8)
__code__ = marshal.load(_fp)
#
# Again, we create an almost-normal environment (only __code__ is
# funny) and go.
#
del argvemulator, os, sys, marshal, _dir, _fp
exec __code__
else:
sys.stderr.write("%s: neither __main__.py nor __main__.pyc found\n"%sys.argv[0])
sys.exit(1)
| mit |
willdavidc/piel | catkin_ws/src/piel/scripts/venv/lib/python2.7/site-packages/pip/_vendor/requests/models.py | 360 | 30532 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string,
check_header_validity)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
curr_pos = body.tell()
body.seek(0, 2)
end_pos = body.tell()
self.headers['Content-Length'] = builtin_str(max(0, end_pos - curr_pos))
body.seek(curr_pos, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
reason = self.reason.decode('utf-8', 'ignore')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
return self.raw.release_conn()
| mit |
android-ia/platform_external_chromium_org_tools_gyp | test/win/gyptest-link-ordering.py | 225 | 3058 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the link order of object files is the same between msvs and ninja.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('link-ordering.gyp', chdir=CHDIR)
test.build('link-ordering.gyp', test.ALL, chdir=CHDIR)
def GetDisasm(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
# Get disassembly and drop int3 padding between functions.
return '\n'.join(
x for x in test.run_dumpbin('/disasm', full_path).splitlines()
if 'CC' not in x)
# This is the full dump that we expect. The source files in the .gyp match
# this order which is what determines the ordering in the binary.
expected_disasm_basic = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?z@@YAHXZ:
00401010: B8 03 00 00 00 mov eax,3
00401015: C3 ret
?x@@YAHXZ:
00401020: B8 01 00 00 00 mov eax,1
00401025: C3 ret
?y@@YAHXZ:
00401030: B8 02 00 00 00 mov eax,2
00401035: C3 ret
_main:
00401040: 33 C0 xor eax,eax
00401042: C3 ret
'''
if expected_disasm_basic not in GetDisasm('test_ordering_exe.exe'):
print GetDisasm('test_ordering_exe.exe')
test.fail_test()
# Similar to above. The VS generator handles subdirectories differently.
expected_disasm_subdirs = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
_main:
00401010: 33 C0 xor eax,eax
00401012: C3 ret
?y@@YAHXZ:
00401020: B8 02 00 00 00 mov eax,2
00401025: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
'''
if expected_disasm_subdirs not in GetDisasm('test_ordering_subdirs.exe'):
print GetDisasm('test_ordering_subdirs.exe')
test.fail_test()
# Similar, but with directories mixed into folders (crt and main at the same
# level, but with a subdir in the middle).
expected_disasm_subdirs_mixed = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?x@@YAHXZ:
00401010: B8 01 00 00 00 mov eax,1
00401015: C3 ret
_main:
00401020: 33 C0 xor eax,eax
00401022: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
?y@@YAHXZ:
00401040: B8 02 00 00 00 mov eax,2
00401045: C3 ret
'''
if (expected_disasm_subdirs_mixed not in
GetDisasm('test_ordering_subdirs_mixed.exe')):
print GetDisasm('test_ordering_subdirs_mixed.exe')
test.fail_test()
test.pass_test()
| bsd-3-clause |
ealegol/kolla-newton | kolla/template/filters.py | 7 | 1086 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jinja2 import contextfilter
@contextfilter
def customizable(context, val_list, call_type):
name = context['image_name'].replace("-", "_") + "_" + call_type + "_"
if name + "override" in context:
return context[name + "override"]
if name + "append" in context:
val_list.extend(context[name + "append"])
if name + "remove" in context:
for removal in context[name + "remove"]:
if removal in val_list:
val_list.remove(removal)
return val_list
| apache-2.0 |
welshjf/pyqtgraph | pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate_pyqt5.py | 38 | 10666 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/graphicsItems/PlotItem/plotConfigTemplate.ui'
#
# Created: Wed Mar 26 15:09:28 2014
# by: PyQt5 UI code generator 5.0.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(481, 840)
self.averageGroup = QtWidgets.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName("averageGroup")
self.gridLayout_5 = QtWidgets.QGridLayout(self.averageGroup)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.avgParamList = QtWidgets.QListWidget(self.averageGroup)
self.avgParamList.setObjectName("avgParamList")
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtWidgets.QFrame(Form)
self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171))
self.decimateGroup.setObjectName("decimateGroup")
self.gridLayout_4 = QtWidgets.QGridLayout(self.decimateGroup)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.clipToViewCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.clipToViewCheck.setObjectName("clipToViewCheck")
self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3)
self.maxTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setObjectName("maxTracesCheck")
self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2)
self.downsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3)
self.peakRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.peakRadio.setChecked(True)
self.peakRadio.setObjectName("peakRadio")
self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2)
self.maxTracesSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setObjectName("maxTracesSpin")
self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1)
self.forgetTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setObjectName("forgetTracesCheck")
self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3)
self.meanRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.meanRadio.setObjectName("meanRadio")
self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2)
self.subsampleRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.subsampleRadio.setObjectName("subsampleRadio")
self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2)
self.autoDownsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.autoDownsampleCheck.setChecked(True)
self.autoDownsampleCheck.setObjectName("autoDownsampleCheck")
self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1)
self.downsampleSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty("value", 1)
self.downsampleSpin.setObjectName("downsampleSpin")
self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1)
self.transformGroup = QtWidgets.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(0, 0, 154, 79))
self.transformGroup.setObjectName("transformGroup")
self.gridLayout = QtWidgets.QGridLayout(self.transformGroup)
self.gridLayout.setObjectName("gridLayout")
self.fftCheck = QtWidgets.QCheckBox(self.transformGroup)
self.fftCheck.setObjectName("fftCheck")
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.logXCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logXCheck.setObjectName("logXCheck")
self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1)
self.logYCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logYCheck.setObjectName("logYCheck")
self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1)
self.pointsGroup = QtWidgets.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName("pointsGroup")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.autoPointsCheck = QtWidgets.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName("autoPointsCheck")
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtWidgets.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName("gridGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName("gridLayout_2")
self.xGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.xGridCheck.setObjectName("xGridCheck")
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.yGridCheck.setObjectName("yGridCheck")
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtWidgets.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty("value", 128)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.gridAlphaSlider.setObjectName("gridAlphaSlider")
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtWidgets.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName("alphaGroup")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName("horizontalLayout")
self.autoAlphaCheck = QtWidgets.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName("autoAlphaCheck")
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtWidgets.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty("value", 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setObjectName("alphaSlider")
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.averageGroup.setToolTip(_translate("Form", "Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available)."))
self.averageGroup.setTitle(_translate("Form", "Average"))
self.clipToViewCheck.setToolTip(_translate("Form", "Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced."))
self.clipToViewCheck.setText(_translate("Form", "Clip to View"))
self.maxTracesCheck.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed."))
self.maxTracesCheck.setText(_translate("Form", "Max Traces:"))
self.downsampleCheck.setText(_translate("Form", "Downsample"))
self.peakRadio.setToolTip(_translate("Form", "Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower."))
self.peakRadio.setText(_translate("Form", "Peak"))
self.maxTracesSpin.setToolTip(_translate("Form", "If multiple curves are displayed in this plot, check \"Max Traces\" and set this value to limit the number of traces that are displayed."))
self.forgetTracesCheck.setToolTip(_translate("Form", "If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden)."))
self.forgetTracesCheck.setText(_translate("Form", "Forget hidden traces"))
self.meanRadio.setToolTip(_translate("Form", "Downsample by taking the mean of N samples."))
self.meanRadio.setText(_translate("Form", "Mean"))
self.subsampleRadio.setToolTip(_translate("Form", "Downsample by taking the first of N samples. This method is fastest and least accurate."))
self.subsampleRadio.setText(_translate("Form", "Subsample"))
self.autoDownsampleCheck.setToolTip(_translate("Form", "Automatically downsample data based on the visible range. This assumes X values are uniformly spaced."))
self.autoDownsampleCheck.setText(_translate("Form", "Auto"))
self.downsampleSpin.setToolTip(_translate("Form", "Downsample data before plotting. (plot every Nth sample)"))
self.downsampleSpin.setSuffix(_translate("Form", "x"))
self.fftCheck.setText(_translate("Form", "Power Spectrum (FFT)"))
self.logXCheck.setText(_translate("Form", "Log X"))
self.logYCheck.setText(_translate("Form", "Log Y"))
self.pointsGroup.setTitle(_translate("Form", "Points"))
self.autoPointsCheck.setText(_translate("Form", "Auto"))
self.xGridCheck.setText(_translate("Form", "Show X Grid"))
self.yGridCheck.setText(_translate("Form", "Show Y Grid"))
self.label.setText(_translate("Form", "Opacity"))
self.alphaGroup.setTitle(_translate("Form", "Alpha"))
self.autoAlphaCheck.setText(_translate("Form", "Auto"))
| mit |
lmazuel/ansible | lib/ansible/modules/network/avi/avi_applicationprofile.py | 50 | 6539 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationProfile Avi RESTful Object
description:
- This module is used to configure ApplicationProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_service_profile:
description:
- Specifies various dns service related controls for virtual service.
dos_rl_profile:
description:
- Specifies various security related controls for virtual service.
http_profile:
description:
- Specifies the http application proxy profile parameters.
name:
description:
- The name of the application profile.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved for backend connection.
- Not compatible with connection multiplexing.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tcp_app_profile:
description:
- Specifies the tcp application proxy profile parameters.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specifies which application layer proxy is enabled for the virtual service.
- Enum options - APPLICATION_PROFILE_TYPE_L4, APPLICATION_PROFILE_TYPE_HTTP, APPLICATION_PROFILE_TYPE_SYSLOG, APPLICATION_PROFILE_TYPE_DNS,
- APPLICATION_PROFILE_TYPE_SSL.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the application profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create an Application Profile for HTTP application enabled for SSL traffic
avi_applicationprofile:
controller: ''
username: ''
password: ''
http_profile:
cache_config:
age_header: true
aggressive: false
date_header: true
default_expire: 600
enabled: false
heuristic_expire: false
max_cache_size: 0
max_object_size: 4194304
mime_types_group_refs:
- admin:System-Cacheable-Resource-Types
min_object_size: 100
query_cacheable: false
xcache_header: true
client_body_timeout: 0
client_header_timeout: 10000
client_max_body_size: 0
client_max_header_size: 12
client_max_request_size: 48
compression_profile:
compressible_content_ref: admin:System-Compressible-Content-Types
compression: false
remove_accept_encoding_header: true
type: AUTO_COMPRESSION
connection_multiplexing_enabled: true
hsts_enabled: false
hsts_max_age: 365
http_to_https: false
httponly_enabled: false
keepalive_header: false
keepalive_timeout: 30000
max_bad_rps_cip: 0
max_bad_rps_cip_uri: 0
max_bad_rps_uri: 0
max_rps_cip: 0
max_rps_cip_uri: 0
max_rps_unknown_cip: 0
max_rps_unknown_uri: 0
max_rps_uri: 0
post_accept_timeout: 30000
secure_cookie_enabled: false
server_side_redirect_to_https: false
spdy_enabled: false
spdy_fwd_proxy_mode: false
ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE
ssl_everywhere_enabled: false
websockets_enabled: true
x_forwarded_proto_enabled: false
xff_alternate_name: X-Forwarded-For
xff_enabled: true
name: System-HTTP
tenant_ref: admin
type: APPLICATION_PROFILE_TYPE_HTTP
'''
RETURN = '''
obj:
description: ApplicationProfile (api/applicationprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_service_profile=dict(type='dict',),
dos_rl_profile=dict(type='dict',),
http_profile=dict(type='dict',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tcp_app_profile=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
way2heavy/youtube-dl-1 | youtube_dl/extractor/adobetv.py | 96 | 4630 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
float_or_none,
ISO639Utils,
)
class AdobeTVIE(InfoExtractor):
_VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
'thumbnail': 're:https?://.*\.jpg$',
'upload_date': '20110914',
'duration': 60,
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player = self._parse_json(
self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
video_id)
title = player.get('title') or self._search_regex(
r'data-title="([^"]+)"', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(
self._html_search_meta('datepublished', webpage, 'upload date'))
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration') or
self._search_regex(
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
webpage, 'duration', fatal=False))
view_count = str_to_int(self._search_regex(
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
webpage, 'view count'))
formats = [{
'url': source['src'],
'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
'tbr': source.get('bitrate'),
} for source in player['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
_TEST = {
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
'url': 'https://video.tv.adobe.com/v/2456/',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_params = self._parse_json(self._search_regex(
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
video_id)
formats = [{
'url': source['src'],
'width': source.get('width'),
'height': source.get('height'),
'tbr': source.get('bitrate'),
} for source in player_params['sources']]
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
for source in player_params['sources']]))
subtitles = {}
for translation in player_params.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
subtitles[lang_id].append({
'url': translation['vttPath'],
'ext': 'vtt',
})
return {
'id': video_id,
'formats': formats,
'title': player_params['title'],
'description': self._og_search_description(webpage),
'duration': duration,
'subtitles': subtitles,
}
| unlicense |
BlackHole/enigma2-1 | lib/python/Components/MenuList.py | 142 | 1788 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eListboxPythonStringContent, eListbox
class MenuList(HTMLComponent, GUIComponent):
def __init__(self, list, enableWrapAround=True, content=eListboxPythonStringContent):
GUIComponent.__init__(self)
self.list = list
self.l = content()
self.l.setList(self.list)
self.onSelectionChanged = [ ]
self.enableWrapAround = enableWrapAround
def getCurrent(self):
return self.l.getCurrentSelection()
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setContent(self.l)
instance.selectionChanged.get().append(self.selectionChanged)
if self.enableWrapAround:
self.instance.setWrapAround(True)
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def selectionChanged(self):
for f in self.onSelectionChanged:
f()
def getSelectionIndex(self):
return self.l.getCurrentSelectionIndex()
def getSelectedIndex(self):
return self.l.getCurrentSelectionIndex()
def setList(self, list):
self.list = list
self.l.setList(self.list)
def moveToIndex(self, idx):
if self.instance is not None:
self.instance.moveSelectionTo(idx)
def pageUp(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageUp)
def pageDown(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageDown)
def up(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.moveUp)
def down(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.moveDown)
def selectionEnabled(self, enabled):
if self.instance is not None:
self.instance.setSelectionEnable(enabled)
| gpl-2.0 |
GaussDing/django | tests/postgres_tests/test_array.py | 23 | 17719 | import decimal
import json
import unittest
import uuid
from django import forms
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils import timezone
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
)
class TestSaveLoad(TestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
class TestChecks(TestCase):
def test_field_checks(self):
field = ArrayField(models.CharField())
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E001')
def test_invalid_base_fields(self):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
field.set_attributes_from_name('field')
errors = field.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2])
class TestValidation(TestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(cm.exception.message % cm.exception.params, 'Item 1 in the array did not validate: This field cannot be null.')
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.')
class TestSimpleFormField(TestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(TestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" />
<input id="id_array_1" name="array_1" type="text" />
<input id="id_array_2" name="array_2" type="text" />
</td>
</tr>
''')
| bsd-3-clause |
dsquareindia/gensim | gensim/scripts/make_wikicorpus.py | 75 | 4574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Copyright (C) 2012 Lars Buitinck <larsmans@gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
This actually creates three files:
* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
Matrix Matrix format
* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
removing tokens that appear in more than 10%% of all documents). Defaults to
100,000.
If you have the `pattern` package installed, this script will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
Example: python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki_en
"""
import logging
import os.path
import sys
from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
from gensim.models import TfidfModel
# Wiki is first scanned for all distinct word types (~7M). The types that
# appear in more than 10% of articles are removed and from the rest, the
# DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 100000
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
if not os.path.isdir(os.path.dirname(outp)):
raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
if len(sys.argv) > 3:
keep_words = int(sys.argv[3])
else:
keep_words = DEFAULT_DICT_SIZE
online = 'online' in program
lemmatize = 'lemma' in program
debug = 'nodebug' not in program
if online:
dictionary = HashDictionary(id_range=keep_words, debug=debug)
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
wiki.save(outp + '_corpus.pkl.bz2')
dictionary.allow_update = False
else:
wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
del wiki
# initialize corpus reader and word->id mapping
mm = MmCorpus(outp + '_bow.mm')
# build tfidf, ~50min
tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
tfidf.save(outp + '.tfidf_model')
# save tfidf vectors in matrix market format
# ~4h; result file is 15GB! bzip2'ed down to 4.5GB
MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
logger.info("finished running %s" % program)
| lgpl-2.1 |
hubert667/AIR | build/celery/build/lib.linux-i686-2.7/celery/events/__init__.py | 8 | 14103 | # -*- coding: utf-8 -*-
"""
celery.events
~~~~~~~~~~~~~
Events is a stream of messages sent for certain actions occurring
in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT`
is enabled), used for monitoring purposes.
"""
from __future__ import absolute_import
import os
import time
import threading
import warnings
from collections import deque
from contextlib import contextmanager
from copy import copy
from operator import itemgetter
from kombu import Exchange, Queue, Producer
from kombu.connection import maybe_channel
from kombu.mixins import ConsumerMixin
from kombu.utils import cached_property
from celery.app import app_or_default
from celery.utils import anon_nodename, uuid
from celery.utils.functional import dictfilter
from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms
__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver']
event_exchange = Exchange('celeryev', type='topic')
_TZGETTER = itemgetter('utcoffset', 'timestamp')
W_YAJL = """
anyjson is currently using the yajl library.
This json implementation is broken, it severely truncates floats
so timestamps will not work.
Please uninstall yajl or force anyjson to use a different library.
"""
CLIENT_CLOCK_SKEW = -1
def get_exchange(conn):
ex = copy(event_exchange)
if conn.transport.driver_type == 'redis':
# quick hack for Issue #436
ex.type = 'fanout'
return ex
def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
"""Create an event.
An event is a dictionary, the only required field is ``type``.
A ``timestamp`` field will be set to the current time if not provided.
"""
event = __dict__(_fields, **fields) if _fields else fields
if 'timestamp' not in event:
event.update(timestamp=__now__(), type=type)
else:
event['type'] = type
return event
def group_from(type):
"""Get the group part of an event type name.
E.g.::
>>> group_from('task-sent')
'task'
>>> group_from('custom-my-event')
'custom'
"""
return type.split('-', 1)[0]
class EventDispatcher(object):
"""Dispatches event messages.
:param connection: Connection to the broker.
:keyword hostname: Hostname to identify ourselves as,
by default uses the hostname returned by
:func:`~celery.utils.anon_nodename`.
:keyword groups: List of groups to send events for. :meth:`send` will
ignore send requests to groups not in this list.
If this is :const:`None`, all events will be sent. Example groups
include ``"task"`` and ``"worker"``.
:keyword enabled: Set to :const:`False` to not actually publish any events,
making :meth:`send` a noop operation.
:keyword channel: Can be used instead of `connection` to specify
an exact channel to use when sending events.
:keyword buffer_while_offline: If enabled events will be buffered
while the connection is down. :meth:`flush` must be called
as soon as the connection is re-established.
You need to :meth:`close` this after use.
"""
DISABLED_TRANSPORTS = set(['sql'])
app = None
# set of callbacks to be called when :meth:`enabled`.
on_enabled = None
# set of callbacks to be called when :meth:`disabled`.
on_disabled = None
def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
serializer=None, groups=None):
self.app = app_or_default(app or self.app)
self.connection = connection
self.channel = channel
self.hostname = hostname or anon_nodename()
self.buffer_while_offline = buffer_while_offline
self.mutex = threading.Lock()
self.producer = None
self._outbound_buffer = deque()
self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
self.on_enabled = set()
self.on_disabled = set()
self.groups = set(groups or [])
self.tzoffset = [-time.timezone, -time.altzone]
self.clock = self.app.clock
if not connection and channel:
self.connection = channel.connection.client
self.enabled = enabled
conninfo = self.connection or self.app.connection()
self.exchange = get_exchange(conninfo)
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
self.enabled = False
if self.enabled:
self.enable()
self.headers = {'hostname': self.hostname}
self.pid = os.getpid()
self.warn_if_yajl()
def warn_if_yajl(self):
import anyjson
if anyjson.implementation.name == 'yajl':
warnings.warn(UserWarning(W_YAJL))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def enable(self):
self.producer = Producer(self.channel or self.connection,
exchange=self.exchange,
serializer=self.serializer)
self.enabled = True
for callback in self.on_enabled:
callback()
def disable(self):
if self.enabled:
self.enabled = False
self.close()
for callback in self.on_disabled:
callback()
def publish(self, type, fields, producer, retry=False,
retry_policy=None, blind=False, utcoffset=utcoffset,
Event=Event):
"""Publish event using a custom :class:`~kombu.Producer`
instance.
:param type: Event type name, with group separated by dash (`-`).
:param fields: Dictionary of event fields, must be json serializable.
:param producer: :class:`~kombu.Producer` instance to use,
only the ``publish`` method will be called.
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.
"""
with self.mutex:
clock = None if blind else self.clock.forward()
event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
pid=self.pid, clock=clock, **fields)
exchange = self.exchange
producer.publish(
event,
routing_key=type.replace('-', '.'),
exchange=exchange.name,
retry=retry,
retry_policy=retry_policy,
declare=[exchange],
serializer=self.serializer,
headers=self.headers,
)
def send(self, type, blind=False, **fields):
"""Send event.
:param type: Event type name, with group separated by dash (`-`).
:keyword retry: Retry in the event of connection failure.
:keyword retry_policy: Dict of custom retry policy, see
:meth:`~kombu.Connection.ensure`.
:keyword blind: Don't set logical clock value (also do not forward
the internal logical clock).
:keyword Event: Event type used to create event,
defaults to :func:`Event`.
:keyword utcoffset: Function returning the current utcoffset in hours.
:keyword \*\*fields: Event fields, must be json serializable.
"""
if self.enabled:
groups = self.groups
if groups and group_from(type) not in groups:
return
try:
self.publish(type, fields, self.producer, blind)
except Exception as exc:
if not self.buffer_while_offline:
raise
self._outbound_buffer.append((type, fields, exc))
def flush(self):
"""Flushes the outbound buffer."""
while self._outbound_buffer:
try:
type, fields, _ = self._outbound_buffer.popleft()
except IndexError:
return
self.send(type, **fields)
def extend_buffer(self, other):
"""Copies the outbound buffer of another instance."""
self._outbound_buffer.extend(other._outbound_buffer)
def close(self):
"""Close the event dispatcher."""
self.mutex.locked() and self.mutex.release()
self.producer = None
def _get_publisher(self):
return self.producer
def _set_publisher(self, producer):
self.producer = producer
publisher = property(_get_publisher, _set_publisher) # XXX compat
class EventReceiver(ConsumerMixin):
"""Capture events.
:param connection: Connection to the broker.
:keyword handlers: Event handlers.
:attr:`handlers` is a dict of event types and their handlers,
the special handler `"*"` captures all events that doesn't have a
handler.
"""
app = None
def __init__(self, channel, handlers=None, routing_key='#',
node_id=None, app=None, queue_prefix='celeryev',
accept=None):
self.app = app_or_default(app or self.app)
self.channel = maybe_channel(channel)
self.handlers = {} if handlers is None else handlers
self.routing_key = routing_key
self.node_id = node_id or uuid()
self.queue_prefix = queue_prefix
self.exchange = get_exchange(self.connection or self.app.connection())
self.queue = Queue('.'.join([self.queue_prefix, self.node_id]),
exchange=self.exchange,
routing_key=self.routing_key,
auto_delete=True,
durable=False,
queue_arguments=self._get_queue_arguments())
self.clock = self.app.clock
self.adjust_clock = self.clock.adjust
self.forward_clock = self.clock.forward
if accept is None:
accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
self.accept = accept
def _get_queue_arguments(self):
conf = self.app.conf
return dictfilter({
'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL),
'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES),
})
def process(self, type, event):
"""Process the received event by dispatching it to the appropriate
handler."""
handler = self.handlers.get(type) or self.handlers.get('*')
handler and handler(event)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.queue],
callbacks=[self._receive], no_ack=True,
accept=self.accept)]
def on_consume_ready(self, connection, channel, consumers,
wakeup=True, **kwargs):
if wakeup:
self.wakeup_workers(channel=channel)
def itercapture(self, limit=None, timeout=None, wakeup=True):
return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
def capture(self, limit=None, timeout=None, wakeup=True):
"""Open up a consumer capturing events.
This has to run in the main process, and it will never
stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
"""
return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
def wakeup_workers(self, channel=None):
self.app.control.broadcast('heartbeat',
connection=self.connection,
channel=channel)
def event_from_message(self, body, localize=True,
now=time.time, tzfields=_TZGETTER,
adjust_timestamp=adjust_timestamp,
CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
type = body['type']
if type == 'task-sent':
# clients never sync so cannot use their clock value
_c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
self.adjust_clock(_c)
else:
try:
clock = body['clock']
except KeyError:
body['clock'] = self.forward_clock()
else:
self.adjust_clock(clock)
if localize:
try:
offset, timestamp = tzfields(body)
except KeyError:
pass
else:
body['timestamp'] = adjust_timestamp(timestamp, offset)
body['local_received'] = now()
return type, body
def _receive(self, body, message):
self.process(*self.event_from_message(body))
@property
def connection(self):
return self.channel.connection.client if self.channel else None
class Events(object):
def __init__(self, app=None):
self.app = app
@cached_property
def Receiver(self):
return self.app.subclass_with_self(EventReceiver,
reverse='events.Receiver')
@cached_property
def Dispatcher(self):
return self.app.subclass_with_self(EventDispatcher,
reverse='events.Dispatcher')
@cached_property
def State(self):
return self.app.subclass_with_self('celery.events.state:State',
reverse='events.State')
@contextmanager
def default_dispatcher(self, hostname=None, enabled=True,
buffer_while_offline=False):
with self.app.amqp.producer_pool.acquire(block=True) as prod:
with self.Dispatcher(prod.connection, hostname, enabled,
prod.channel, buffer_while_offline) as d:
yield d
| gpl-3.0 |
sils1297/PyGithub | github/tests/Branch.py | 37 | 2201 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class Branch(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.branch = self.g.get_user().get_repo("PyGithub").get_branches()[0]
def testAttributes(self):
self.assertEqual(self.branch.name, "topic/RewriteWithGeneratedCode")
self.assertEqual(self.branch.commit.sha, "1292bf0e22c796e91cc3d6e24b544aece8c21f2a")
| gpl-3.0 |
vlas-sokolov/multicube | multicube/astro_toolbox.py | 1 | 5386 | import numpy as np
from astropy.io import fits
import os
# TODO: rewrite this to have multiple components generated here.
# Having a bundle of test filaments would be very nice.
def make_test_cube(shape=(30,9,9), outfile='test.fits',
sigma=None, seed=0, writeSN=False ):
"""
Generates a simple gaussian cube with noise of
given shape and writes it as a fits file.
"""
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel
if sigma is None:
sigma1d, sigma2d = shape[0]/10., np.mean(shape[1:])/5.
else:
sigma1d, sigma2d = sigma
gauss1d = Gaussian1DKernel(stddev=sigma1d, x_size=shape[0])
gauss2d = Gaussian2DKernel(stddev=sigma2d, x_size=shape[1],
y_size=shape[2])
signal_cube = gauss1d.array[:,None,None] * gauss2d.array
signal_cube=signal_cube/signal_cube.max()
# adding noise:
np.random.seed(seed)
noise_cube = (np.random.random(signal_cube.shape)-.5)* \
np.median(signal_cube.std(axis=0))
test_cube = signal_cube+noise_cube
true_rms = noise_cube.std()
# making a simple header for the test cube:
test_hdu = fits.PrimaryHDU(test_cube)
# the strange cdelt values are a workaround
# for what seems to be a bug in wcslib:
# https://github.com/astropy/astropy/issues/4555
cdelt1, cdelt2, cdelt3 = -(4e-3+1e-8), 4e-3+1e-8, -0.1
keylist = {'CTYPE1': 'RA---GLS', 'CTYPE2': 'DEC--GLS', 'CTYPE3': 'VRAD',
'CDELT1': cdelt1, 'CDELT2': cdelt2, 'CDELT3': cdelt3,
'CRVAL1': 0, 'CRVAL2': 0, 'CRVAL3': 5,
'CRPIX1': 9, 'CRPIX2': 0, 'CRPIX3': 5,
'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CUNIT3': 'km s-1',
'BUNIT' : 'K', 'EQUINOX': 2000.0}
# write out some values used to generate the cube:
keylist['SIGMA' ] = abs(sigma1d*cdelt3), 'in units of CUNIT3'
keylist['RMSLVL'] = true_rms
keylist['SEED' ] = seed
test_header = fits.Header()
test_header.update(keylist)
test_hdu = fits.PrimaryHDU(data=test_cube, header=test_header)
test_hdu.writeto(outfile, clobber=True, checksum=True)
if writeSN:
signal_hdu = fits.PrimaryHDU(data=signal_cube, header=test_header)
noise_hdu = fits.PrimaryHDU(data=noise_cube , header=test_header)
signame, noiname = [outfile.split('.fits')[0]+'-'+i+'.fits'
for i in ['signal','noise']]
signal_hdu.writeto(signame, clobber=True, checksum=True)
noise_hdu.writeto( noiname, clobber=True, checksum=True)
def download_test_cube(outfile='test.fits'):
"""
Downloads a sample fits file from Dropbox (325kB).
"""
from astropy.utils.data import download_file
test_cube_url = 'https://db.tt/i0jWA7DU'
tmp_path = download_file(test_cube_url)
try:
os.rename(tmp_path, outfile)
except OSError:
# os.rename doesn't like cross-device links
import shutil
shutil.move(tmp_path, outfile)
def get_ncores():
"""
Try to get the number of cpu cores
"""
try:
import multiprocessing
ncores = multiprocessing.cpu_count()
except ImportError:
ncores = 1
return ncores
def in_ipynb():
"""
Taken from Adam Ginsburg's SO answer here:
http://stackoverflow.com/a/24937408/4118756
"""
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False
def tinker_ring_parspace(parseed, xy_shape, parindices=[], paramps=[]):
"""
An oscilating radial structure is intruduced to selected parameters.
"""
xy_pars = np.empty((len(parseed),) + xy_shape)
xy_pars[:] = np.array(parseed)[:,None,None]
yarr, xarr = np.indices(xy_shape)
cent = (np.array(xy_shape)-1)/2.
arm = (min(xy_shape)-1)/2.
dist_norm = np.sqrt(((np.array([xarr,yarr]) -
cent[:,None,None])**2).sum(axis=0)) / arm
# a pretty distort function
c = 1.5*np.pi # normalization constant for radial distance
f = lambda x: (np.sinc(x*c)**2 + np.cos(x*c)**2)
for par_idx, par_amp in zip(parindices, paramps):
xy_pars[par_idx] += (f(dist_norm)-1) * par_amp
return xy_pars
def write_skycoord_table(data, cube_ref, **kwargs):
"""
Writes out a text file with flattened coordinates of the cube
stacked with input array data. Additional arguments are passed
to astropy's text writing function.
TODO: add a useful `names` keyword?
See astropy.io.ascii.write docstring for more info.
Parameters
----------
data : array-like structure of the same xy-grid as cube_ref.
cube_ref : a cube file to get the coordinate grid from.
"""
from astropy.table import Table
from astropy.io import ascii
from spectral_cube import SpectralCube
cube = SpectralCube.read(cube_ref)
flat_coords = [cube.spatial_coordinate_map[i].flatten() for i in [1,0]]
# TODO: finish this up for multiple components
#n_repeat = np.prod(np.array(data).shape)%np.prod(cube.shape[1:])+1
table = Table(np.vstack(flat_coords +
[np.array(xy_slice).flatten() for xy_slice in data]).T)
ascii.write(table, **kwargs)
| mit |
foss-transportationmodeling/rettina-server | flask/local/lib/python2.7/site-packages/whoosh/searching.py | 30 | 64692 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""This module contains classes and functions related to searching the index.
"""
from __future__ import division
import copy
import weakref
from math import ceil
from whoosh import classify, highlight, query, scoring
from whoosh.compat import iteritems, itervalues, iterkeys, xrange
from whoosh.idsets import DocIdSet, BitSet
from whoosh.reading import TermNotFound
from whoosh.util.cache import lru_cache
class NoTermsException(Exception):
"""Exception raised you try to access matched terms on a :class:`Results`
object was created without them. To record which terms matched in which
document, you need to call the :meth:`Searcher.search` method with
``terms=True``.
"""
message = "Results were created without recording terms"
class TimeLimit(Exception):
"""Raised by :class:`TimeLimitedCollector` if the time limit is reached
before the search finishes. If you have a reference to the collector, you
can get partial results by calling :meth:`TimeLimitedCollector.results`.
"""
pass
# Context class
class SearchContext(object):
"""A container for information about the current search that may be used
by the collector or the query objects to change how they operate.
"""
def __init__(self, needs_current=False, weighting=None, top_query=None,
limit=0):
"""
:param needs_current: if True, the search requires that the matcher
tree be "valid" and able to access information about the current
match. For queries during matcher instantiation, this means they
should not instantiate a matcher that doesn't allow access to the
current match's value, weight, and so on. For collectors, this
means they should advanced the matcher doc-by-doc rather than using
shortcut methods such as all_ids().
:param weighting: the Weighting object to use for scoring documents.
:param top_query: a reference to the top-level query object.
:param limit: the number of results requested by the user.
"""
self.needs_current = needs_current
self.weighting = weighting
self.top_query = top_query
self.limit = limit
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.__dict__)
def set(self, **kwargs):
ctx = copy.copy(self)
ctx.__dict__.update(kwargs)
return ctx
# Searcher class
class Searcher(object):
"""Wraps an :class:`~whoosh.reading.IndexReader` object and provides
methods for searching the index.
"""
def __init__(self, reader, weighting=scoring.BM25F, closereader=True,
fromindex=None, parent=None):
"""
:param reader: An :class:`~whoosh.reading.IndexReader` object for
the index to search.
:param weighting: A :class:`whoosh.scoring.Weighting` object to use to
score found documents.
:param closereader: Whether the underlying reader will be closed when
the searcher is closed.
:param fromindex: An optional reference to the index of the underlying
reader. This is required for :meth:`Searcher.up_to_date` and
:meth:`Searcher.refresh` to work.
"""
self.ixreader = reader
self.is_closed = False
self._closereader = closereader
self._ix = fromindex
self._doccount = self.ixreader.doc_count_all()
# Cache for PostingCategorizer objects (supports fields without columns)
self._field_caches = {}
if parent:
self.parent = weakref.ref(parent)
self.schema = parent.schema
self._idf_cache = parent._idf_cache
self._filter_cache = parent._filter_cache
else:
self.parent = None
self.schema = self.ixreader.schema
self._idf_cache = {}
self._filter_cache = {}
if type(weighting) is type:
self.weighting = weighting()
else:
self.weighting = weighting
self.leafreaders = None
self.subsearchers = None
if not self.ixreader.is_atomic():
self.leafreaders = self.ixreader.leaf_readers()
self.subsearchers = [(self._subsearcher(r), offset) for r, offset
in self.leafreaders]
# Copy attributes/methods from wrapped reader
for name in ("stored_fields", "all_stored_fields", "has_vector",
"vector", "vector_as", "lexicon", "field_terms",
"frequency", "doc_frequency", "term_info",
"doc_field_length", "corrector", "iter_docs"):
setattr(self, name, getattr(self.ixreader, name))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def _subsearcher(self, reader):
return self.__class__(reader, fromindex=self._ix,
weighting=self.weighting, parent=self)
def _offset_for_subsearcher(self, subsearcher):
for ss, offset in self.subsearchers:
if ss is subsearcher:
return offset
def leaf_searchers(self):
if self.is_atomic():
return [(self, 0)]
else:
return self.subsearchers
def is_atomic(self):
return self.reader().is_atomic()
def has_parent(self):
return self.parent is not None
def get_parent(self):
"""Returns the parent of this searcher (if has_parent() is True), or
else self.
"""
if self.has_parent():
# Call the weak reference to get the parent searcher
return self.parent()
else:
return self
def doc_count(self):
"""Returns the number of UNDELETED documents in the index.
"""
return self.ixreader.doc_count()
def doc_count_all(self):
"""Returns the total number of documents, DELETED OR UNDELETED, in
the index.
"""
return self._doccount
def field_length(self, fieldname):
if self.has_parent():
return self.get_parent().field_length(fieldname)
else:
return self.reader().field_length(fieldname)
def max_field_length(self, fieldname):
if self.has_parent():
return self.get_parent().max_field_length(fieldname)
else:
return self.reader().max_field_length(fieldname)
def up_to_date(self):
"""Returns True if this Searcher represents the latest version of the
index, for backends that support versioning.
"""
if not self._ix:
raise Exception("No reference to index")
return self._ix.latest_generation() == self.ixreader.generation()
def refresh(self):
"""Returns a fresh searcher for the latest version of the index::
my_searcher = my_searcher.refresh()
If the index has not changed since this searcher was created, this
searcher is simply returned.
This method may CLOSE underlying resources that are no longer needed
by the refreshed searcher, so you CANNOT continue to use the original
searcher after calling ``refresh()`` on it.
"""
if not self._ix:
raise Exception("No reference to index")
if self._ix.latest_generation() == self.reader().generation():
return self
# Get a new reader, re-using resources from the current reader if
# possible
self.is_closed = True
newreader = self._ix.reader(reuse=self.ixreader)
return self.__class__(newreader, fromindex=self._ix,
weighting=self.weighting)
def close(self):
if self._closereader:
self.ixreader.close()
self.is_closed = True
def avg_field_length(self, fieldname, default=None):
if not self.schema[fieldname].scorable:
return default
return self.field_length(fieldname) / (self._doccount or 1)
def reader(self):
"""Returns the underlying :class:`~whoosh.reading.IndexReader`.
"""
return self.ixreader
def context(self, **kwargs):
"""Generates a :class:`SearchContext` for this searcher.
"""
if "weighting" not in kwargs:
kwargs["weighting"] = self.weighting
return SearchContext(**kwargs)
def boolean_context(self):
"""Shortcut returns a SearchContext set for unscored (boolean)
searching.
"""
return self.context(needs_current=False, weighting=None)
def postings(self, fieldname, text, weighting=None, qf=1):
"""Returns a :class:`whoosh.matching.Matcher` for the postings of the
given term. Unlike the :func:`whoosh.reading.IndexReader.postings`
method, this method automatically sets the scoring functions on the
matcher from the searcher's weighting object.
"""
weighting = weighting or self.weighting
globalscorer = weighting.scorer(self, fieldname, text, qf=qf)
if self.is_atomic():
return self.ixreader.postings(fieldname, text, scorer=globalscorer)
else:
from whoosh.matching import MultiMatcher
matchers = []
docoffsets = []
term = (fieldname, text)
for subsearcher, offset in self.subsearchers:
r = subsearcher.reader()
if term in r:
# Make a segment-specific scorer; the scorer should call
# searcher.parent() to get global stats
scorer = weighting.scorer(subsearcher, fieldname, text, qf=qf)
m = r.postings(fieldname, text, scorer=scorer)
matchers.append(m)
docoffsets.append(offset)
if not matchers:
raise TermNotFound(fieldname, text)
return MultiMatcher(matchers, docoffsets, globalscorer)
def idf(self, fieldname, text):
"""Calculates the Inverse Document Frequency of the current term (calls
idf() on the searcher's Weighting object).
"""
# This method just calls the Weighting object's idf() method, but
# caches the result. So Weighting objects should call *this* method
# which will then call *their own* idf() methods.
cache = self._idf_cache
term = (fieldname, text)
if term in cache:
return cache[term]
idf = self.weighting.idf(self, fieldname, text)
cache[term] = idf
return idf
def document(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
This method is equivalent to::
searcher.stored_fields(searcher.document_number(<keyword args>))
Where Searcher.documents() returns a generator, this function returns
either a dictionary or None. Use it when you assume the given keyword
arguments either match zero or one documents (i.e. at least one of the
fields is a unique key).
>>> stored_fields = searcher.document(path=u"/a/b")
>>> if stored_fields:
... print(stored_fields['title'])
... else:
... print("There is no document with the path /a/b")
"""
for p in self.documents(**kw):
return p
def documents(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are field
names and the values are terms that must appear in the field.
Returns a generator of dictionaries containing the stored fields of any
documents matching the keyword arguments. If you do not specify any
arguments (``Searcher.documents()``), this method will yield **all**
documents.
>>> for stored_fields in searcher.documents(emailto=u"matt@whoosh.ca"):
... print("Email subject:", stored_fields['subject'])
"""
ixreader = self.ixreader
return (ixreader.stored_fields(docnum)
for docnum in self.document_numbers(**kw))
def _kw_to_text(self, kw):
for k, v in iteritems(kw):
field = self.schema[k]
kw[k] = field.to_bytes(v)
def _query_for_kw(self, kw):
subqueries = []
for key, value in iteritems(kw):
subqueries.append(query.Term(key, value))
if subqueries:
q = query.And(subqueries).normalize()
else:
q = query.Every()
return q
def document_number(self, **kw):
"""Returns the document number of the document matching the given
keyword arguments, where the keyword keys are field names and the
values are terms that must appear in the field.
>>> docnum = searcher.document_number(path=u"/a/b")
Where Searcher.document_numbers() returns a generator, this function
returns either an int or None. Use it when you assume the given keyword
arguments either match zero or one documents (i.e. at least one of the
fields is a unique key).
:rtype: int
"""
# In the common case where only one keyword was given, just use
# first_id() instead of building a query.
self._kw_to_text(kw)
if len(kw) == 1:
k, v = list(kw.items())[0]
try:
return self.reader().first_id(k, v)
except TermNotFound:
return None
else:
m = self._query_for_kw(kw).matcher(self, self.boolean_context())
if m.is_active():
return m.id()
def document_numbers(self, **kw):
"""Returns a generator of the document numbers for documents matching
the given keyword arguments, where the keyword keys are field names and
the values are terms that must appear in the field. If you do not
specify any arguments (``Searcher.document_numbers()``), this method
will yield **all** document numbers.
>>> docnums = list(searcher.document_numbers(emailto="matt@whoosh.ca"))
"""
self._kw_to_text(kw)
return self.docs_for_query(self._query_for_kw(kw))
def _find_unique(self, uniques):
# uniques is a list of ("unique_field_name", "field_value") tuples
delset = set()
for name, value in uniques:
docnum = self.document_number(**{name: value})
if docnum is not None:
delset.add(docnum)
return delset
@lru_cache(20)
def _query_to_comb(self, fq):
return BitSet(self.docs_for_query(fq), size=self.doc_count_all())
def _filter_to_comb(self, obj):
if obj is None:
return None
if isinstance(obj, (set, DocIdSet)):
c = obj
elif isinstance(obj, Results):
c = obj.docs()
elif isinstance(obj, ResultsPage):
c = obj.results.docs()
elif isinstance(obj, query.Query):
c = self._query_to_comb(obj)
else:
raise Exception("Don't know what to do with filter object %r"
% obj)
return c
def suggest(self, fieldname, text, limit=5, maxdist=2, prefix=0):
"""Returns a sorted list of suggested corrections for the given
mis-typed word ``text`` based on the contents of the given field::
>>> searcher.suggest("content", "specail")
["special"]
This is a convenience method. If you are planning to get suggestions
for multiple words in the same field, it is more efficient to get a
:class:`~whoosh.spelling.Corrector` object and use it directly::
corrector = searcher.corrector("fieldname")
for word in words:
print(corrector.suggest(word))
:param limit: only return up to this many suggestions. If there are not
enough terms in the field within ``maxdist`` of the given word, the
returned list will be shorter than this number.
:param maxdist: the largest edit distance from the given word to look
at. Numbers higher than 2 are not very effective or efficient.
:param prefix: require suggestions to share a prefix of this length
with the given word. This is often justifiable since most
misspellings do not involve the first letter of the word. Using a
prefix dramatically decreases the time it takes to generate the
list of words.
"""
c = self.reader().corrector(fieldname)
return c.suggest(text, limit=limit, maxdist=maxdist, prefix=prefix)
def key_terms(self, docnums, fieldname, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Returns the 'numterms' most important terms from the documents
listed (by number) in 'docnums'. You can get document numbers for the
documents your interested in with the document_number() and
document_numbers() methods.
"Most important" is generally defined as terms that occur frequently in
the top hits but relatively infrequently in the collection as a whole.
>>> docnum = searcher.document_number(path=u"/a/b")
>>> keywords_and_scores = searcher.key_terms([docnum], "content")
This method returns a list of ("term", score) tuples. The score may be
useful if you want to know the "strength" of the key terms, however to
just get the terms themselves you can just do this:
>>> kws = [kw for kw, score in searcher.key_terms([docnum], "content")]
:param fieldname: Look at the terms in this field. This field must
store vectors.
:param docnums: A sequence of document numbers specifying which
documents to extract key terms from.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
:param normalize: normalize the scores.
:returns: a list of ("term", score) tuples.
"""
expander = classify.Expander(self.ixreader, fieldname, model=model)
for docnum in docnums:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def key_terms_from_text(self, fieldname, text, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Return the 'numterms' most important terms from the given text.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
"""
expander = classify.Expander(self.ixreader, fieldname, model=model)
expander.add_text(text)
return expander.expanded_terms(numterms, normalize=normalize)
def more_like(self, docnum, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=False, filter=None):
"""Returns a :class:`Results` object containing documents similar to
the given document, based on "key terms" in the given field::
# Get the ID for the document you're interested in
docnum = search.document_number(path=u"/a/b/c")
r = searcher.more_like(docnum)
print("Documents like", searcher.stored_fields(docnum)["title"])
for hit in r:
print(hit["title"])
:param fieldname: the name of the field to use to test similarity.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document, or from a
term vector. If the field isn't stored or vectored in the index,
but you have access to the text another way (for example, loading
from a file or a database), you can supply it using the ``text``
parameter.
:param top: the number of results to return.
:param numterms: the number of "key terms" to extract from the hit and
search for. Using more terms is slower but gives potentially more
and more accurate results.
:param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
to compute "key terms".
:param normalize: whether to normalize term weights.
:param filter: a query, Results object, or set of docnums. The results
will only contain documents that are also in the filter object.
"""
if text:
kts = self.key_terms_from_text(fieldname, text, numterms=numterms,
model=model, normalize=normalize)
else:
kts = self.key_terms([docnum], fieldname, numterms=numterms,
model=model, normalize=normalize)
# Create an Or query from the key terms
q = query.Or([query.Term(fieldname, word, boost=weight)
for word, weight in kts])
return self.search(q, limit=top, filter=filter, mask=set([docnum]))
def search_page(self, query, pagenum, pagelen=10, **kwargs):
"""This method is Like the :meth:`Searcher.search` method, but returns
a :class:`ResultsPage` object. This is a convenience function for
getting a certain "page" of the results for the given query, which is
often useful in web search interfaces.
For example::
querystring = request.get("q")
query = queryparser.parse("content", querystring)
pagenum = int(request.get("page", 1))
pagelen = int(request.get("perpage", 10))
results = searcher.search_page(query, pagenum, pagelen=pagelen)
print("Page %d of %d" % (results.pagenum, results.pagecount))
print("Showing results %d-%d of %d"
% (results.offset + 1, results.offset + results.pagelen + 1,
len(results)))
for hit in results:
print("%d: %s" % (hit.rank + 1, hit["title"]))
(Note that results.pagelen might be less than the pagelen argument if
there aren't enough results to fill a page.)
Any additional keyword arguments you supply are passed through to
:meth:`Searcher.search`. For example, you can get paged results of a
sorted search::
results = searcher.search_page(q, 2, sortedby="date", reverse=True)
Currently, searching for page 100 with pagelen of 10 takes the same
amount of time as using :meth:`Searcher.search` to find the first 1000
results. That is, this method does not have any special optimizations
or efficiencies for getting a page from the middle of the full results
list. (A future enhancement may allow using previous page results to
improve the efficiency of finding the next page.)
This method will raise a ``ValueError`` if you ask for a page number
higher than the number of pages in the resulting query.
:param query: the :class:`whoosh.query.Query` object to match.
:param pagenum: the page number to retrieve, starting at ``1`` for the
first page.
:param pagelen: the number of results per page.
:returns: :class:`ResultsPage`
"""
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
results = self.search(query, limit=pagenum * pagelen, **kwargs)
return ResultsPage(results, pagenum, pagelen)
def find(self, defaultfield, querystring, **kwargs):
from whoosh.qparser import QueryParser
qp = QueryParser(defaultfield, schema=self.ixreader.schema)
q = qp.parse(querystring)
return self.search(q, **kwargs)
def docs_for_query(self, q, for_deletion=False):
"""Returns an iterator of document numbers for documents matching the
given :class:`whoosh.query.Query` object.
"""
# If we're getting the document numbers so we can delete them, use the
# deletion_docs method instead of docs; this lets special queries
# (e.g. nested queries) override what gets deleted
if for_deletion:
method = q.deletion_docs
else:
method = q.docs
if self.subsearchers:
for s, offset in self.subsearchers:
for docnum in method(s):
yield docnum + offset
else:
for docnum in method(self):
yield docnum
def collector(self, limit=10, sortedby=None, reverse=False, groupedby=None,
collapse=None, collapse_limit=1, collapse_order=None,
optimize=True, filter=None, mask=None, terms=False,
maptype=None, scored=True):
"""Low-level method: returns a configured
:class:`whoosh.collectors.Collector` object based on the given
arguments. You can use this object with
:meth:`Searcher.search_with_collector` to search.
See the documentation for the :meth:`Searcher.search` method for a
description of the parameters.
This method may be useful to get a basic collector object and then wrap
it with another collector from ``whoosh.collectors`` or with a custom
collector of your own::
# Equivalent of
# results = mysearcher.search(myquery, limit=10)
# but with a time limt...
# Create a TopCollector
c = mysearcher.collector(limit=10)
# Wrap it with a TimeLimitedCollector with a time limit of
# 10.5 seconds
from whoosh.collectors import TimeLimitedCollector
c = TimeLimitCollector(c, 10.5)
# Search using the custom collector
results = mysearcher.search_with_collector(myquery, c)
"""
from whoosh import collectors
if limit is not None and limit < 1:
raise ValueError("limit must be >= 1")
if not scored and not sortedby:
c = collectors.UnsortedCollector()
elif sortedby:
c = collectors.SortingCollector(sortedby, limit=limit,
reverse=reverse)
elif groupedby or reverse or not limit or limit >= self.doc_count():
# A collector that gathers every matching document
c = collectors.UnlimitedCollector(reverse=reverse)
else:
# A collector that uses block quality optimizations and a heap
# queue to only collect the top N documents
c = collectors.TopCollector(limit, usequality=optimize)
if groupedby:
c = collectors.FacetCollector(c, groupedby, maptype=maptype)
if terms:
c = collectors.TermsCollector(c)
if collapse:
c = collectors.CollapseCollector(c, collapse, limit=collapse_limit,
order=collapse_order)
# Filtering wraps last so it sees the docs first
if filter or mask:
c = collectors.FilterCollector(c, filter, mask)
return c
def search(self, q, **kwargs):
"""Runs a :class:`whoosh.query.Query` object on this searcher and
returns a :class:`Results` object. See :doc:`/searching` for more
information.
This method takes many keyword arguments (documented below).
See :doc:`/facets` for information on using ``sortedby`` and/or
``groupedby``. See :ref:`collapsing` for more information on using
``collapse``, ``collapse_limit``, and ``collapse_order``.
:param query: a :class:`whoosh.query.Query` object to use to match
documents.
:param limit: the maximum number of documents to score. If you're only
interested in the top N documents, you can set limit=N to limit the
scoring for a faster search. Default is 10.
:param scored: whether to score the results. Overriden by ``sortedby``.
If both ``scored=False`` and ``sortedby=None``, the results will be
in arbitrary order, but will usually be computed faster than
scored or sorted results.
:param sortedby: see :doc:`/facets`.
:param reverse: Reverses the direction of the sort. Default is False.
:param groupedby: see :doc:`/facets`.
:param optimize: use optimizations to get faster results when possible.
Default is True.
:param filter: a query, Results object, or set of docnums. The results
will only contain documents that are also in the filter object.
:param mask: a query, Results object, or set of docnums. The results
will not contain any documents that are in the mask object.
:param terms: if True, record which terms were found in each matching
document. See :doc:`/searching` for more information. Default is
False.
:param maptype: by default, the results of faceting with ``groupedby``
is a dictionary mapping group names to ordered lists of document
numbers in the group. You can pass a
:class:`whoosh.sorting.FacetMap` subclass to this keyword argument
to specify a different (usually faster) method for grouping. For
example, ``maptype=sorting.Count`` would store only the count of
documents in each group, instead of the full list of document IDs.
:param collapse: a :doc:`facet </facets>` to use to collapse the
results. See :ref:`collapsing` for more information.
:param collapse_limit: the maximum number of documents to allow with
the same collapse key. See :ref:`collapsing` for more information.
:param collapse_order: an optional ordering :doc:`facet </facets>`
to control which documents are kept when collapsing. The default
(``collapse_order=None``) uses the results order (e.g. the highest
scoring documents in a scored search).
:rtype: :class:`Results`
"""
# Call the collector() method to build a collector based on the
# parameters passed to this method
c = self.collector(**kwargs)
# Call the lower-level method to run the collector
self.search_with_collector(q, c)
# Return the results object from the collector
return c.results()
def search_with_collector(self, q, collector, context=None):
"""Low-level method: runs a :class:`whoosh.query.Query` object on this
searcher using the given :class:`whoosh.collectors.Collector` object
to collect the results::
myquery = query.Term("content", "cabbage")
uc = collectors.UnlimitedCollector()
tc = TermsCollector(uc)
mysearcher.search_with_collector(myquery, tc)
print(tc.docterms)
print(tc.results())
Note that this method does not return a :class:`Results` object. You
need to access the collector to get a results object or other
information the collector might hold after the search.
:param q: a :class:`whoosh.query.Query` object to use to match
documents.
:param collector: a :class:`whoosh.collectors.Collector` object to feed
the results into.
"""
# Get the search context object from the searcher
context = context or self.context()
# Allow collector to set up based on the top-level information
collector.prepare(self, q, context)
collector.run()
def correct_query(self, q, qstring, correctors=None, terms=None, maxdist=2,
prefix=0, aliases=None):
"""
Returns a corrected version of the given user query using a default
:class:`whoosh.spelling.ReaderCorrector`.
The default:
* Corrects any words that don't appear in the index.
* Takes suggestions from the words in the index. To make certain fields
use custom correctors, use the ``correctors`` argument to pass a
dictionary mapping field names to :class:`whoosh.spelling.Corrector`
objects.
* ONLY CORRECTS FIELDS THAT HAVE THE ``spelling`` ATTRIBUTE in the
schema (or for which you pass a custom corrector). To automatically
check all fields, use ``allfields=True``. Spell checking fields
without ``spelling`` is slower.
Expert users who want more sophisticated correction behavior can create
a custom :class:`whoosh.spelling.QueryCorrector` and use that instead
of this method.
Returns a :class:`whoosh.spelling.Correction` object with a ``query``
attribute containing the corrected :class:`whoosh.query.Query` object
and a ``string`` attributes containing the corrected query string.
>>> from whoosh import qparser, highlight
>>> qtext = 'mary "litle lamb"'
>>> q = qparser.QueryParser("text", myindex.schema)
>>> mysearcher = myindex.searcher()
>>> correction = mysearcher().correct_query(q, qtext)
>>> correction.query
<query.And ...>
>>> correction.string
'mary "little lamb"'
>>> mysearcher.close()
You can use the ``Correction`` object's ``format_string`` method to
format the corrected query string using a
:class:`whoosh.highlight.Formatter` object. For example, you can format
the corrected string as HTML, emphasizing the changed words.
>>> hf = highlight.HtmlFormatter(classname="change")
>>> correction.format_string(hf)
'mary "<strong class="change term0">little</strong> lamb"'
:param q: the :class:`whoosh.query.Query` object to correct.
:param qstring: the original user query from which the query object was
created. You can pass None instead of a string, in which the
second item in the returned tuple will also be None.
:param correctors: an optional dictionary mapping fieldnames to
:class:`whoosh.spelling.Corrector` objects. By default, this method
uses the contents of the index to spell check the terms in the
query. You can use this argument to "override" some fields with a
different correct, for example a
:class:`whoosh.spelling.GraphCorrector`.
:param terms: a sequence of ``("fieldname", "text")`` tuples to correct
in the query. By default, this method corrects terms that don't
appear in the index. You can use this argument to override that
behavior and explicitly specify the terms that should be corrected.
:param maxdist: the maximum number of "edits" (insertions, deletions,
subsitutions, or transpositions of letters) allowed between the
original word and any suggestion. Values higher than ``2`` may be
slow.
:param prefix: suggested replacement words must share this number of
initial characters with the original word. Increasing this even to
just ``1`` can dramatically speed up suggestions, and may be
justifiable since spellling mistakes rarely involve the first
letter of a word.
:param aliases: an optional dictionary mapping field names in the query
to different field names to use as the source of spelling
suggestions. The mappings in ``correctors`` are applied after this.
:rtype: :class:`whoosh.spelling.Correction`
"""
reader = self.reader()
# Dictionary of field name alias mappings
if aliases is None:
aliases = {}
# Dictionary of custom per-field correctors
if correctors is None:
correctors = {}
# Remap correctors dict according to aliases
d = {}
for fieldname, corr in iteritems(correctors):
fieldname = aliases.get(fieldname, fieldname)
d[fieldname] = corr
correctors = d
# Fill in default corrector objects for fields that don't have a custom
# one in the "correctors" dictionary
fieldnames = self.schema.names()
for fieldname in fieldnames:
fieldname = aliases.get(fieldname, fieldname)
if fieldname not in correctors:
correctors[fieldname] = self.reader().corrector(fieldname)
# Get any missing terms in the query in the fields we're correcting
if terms is None:
terms = []
for token in q.all_tokens():
aname = aliases.get(token.fieldname, token.fieldname)
text = token.text
if aname in correctors and (aname, text) not in reader:
# Note that we use the original, not aliases fieldname here
# so if we correct the query we know what it was
terms.append((token.fieldname, token.text))
# Make q query corrector
from whoosh import spelling
sqc = spelling.SimpleQueryCorrector(correctors, terms, aliases)
return sqc.correct_query(q, qstring)
class Results(object):
"""This object is returned by a Searcher. This object represents the
results of a search query. You can mostly use it as if it was a list of
dictionaries, where each dictionary is the stored fields of the document at
that position in the results.
Note that a Results object keeps a reference to the Searcher that created
it, so keeping a reference to a Results object keeps the Searcher alive and
so keeps all files used by it open.
"""
def __init__(self, searcher, q, top_n, docset=None, facetmaps=None,
runtime=0, highlighter=None):
"""
:param searcher: the :class:`Searcher` object that produced these
results.
:param query: the original query that created these results.
:param top_n: a list of (score, docnum) tuples representing the top
N search results.
"""
self.searcher = searcher
self.q = q
self.top_n = top_n
self.docset = docset
self._facetmaps = facetmaps or {}
self.runtime = runtime
self.highlighter = highlighter or highlight.Highlighter()
self.collector = None
self._total = None
self._char_cache = {}
def __repr__(self):
return "<Top %s Results for %r runtime=%s>" % (len(self.top_n),
self.q,
self.runtime)
def __len__(self):
"""Returns the total number of documents that matched the query. Note
this may be more than the number of scored documents, given the value
of the ``limit`` keyword argument to :meth:`Searcher.search`.
If this Results object was created by searching with a ``limit``
keyword, then computing the exact length of the result set may be
expensive for large indexes or large result sets. You may consider
using :meth:`Results.has_exact_length`,
:meth:`Results.estimated_length`, and
:meth:`Results.estimated_min_length` to display an estimated size of
the result set instead of an exact number.
"""
if self._total is None:
self._total = self.collector.count()
return self._total
def __getitem__(self, n):
if isinstance(n, slice):
start, stop, step = n.indices(len(self.top_n))
return [Hit(self, self.top_n[i][1], i, self.top_n[i][0])
for i in xrange(start, stop, step)]
else:
if n >= len(self.top_n):
raise IndexError("results[%r]: Results only has %s hits"
% (n, len(self.top_n)))
return Hit(self, self.top_n[n][1], n, self.top_n[n][0])
def __iter__(self):
"""Yields a :class:`Hit` object for each result in ranked order.
"""
for i in xrange(len(self.top_n)):
yield Hit(self, self.top_n[i][1], i, self.top_n[i][0])
def __contains__(self, docnum):
"""Returns True if the given document number matched the query.
"""
return docnum in self.docs()
def __nonzero__(self):
return not self.is_empty()
__bool__ = __nonzero__
def is_empty(self):
"""Returns True if not documents matched the query.
"""
return self.scored_length() == 0
def items(self):
"""Returns an iterator of (docnum, score) pairs for the scored
documents in the results.
"""
return ((docnum, score) for score, docnum in self.top_n)
def fields(self, n):
"""Returns the stored fields for the document at the ``n`` th position
in the results. Use :meth:`Results.docnum` if you want the raw
document number instead of the stored fields.
"""
return self.searcher.stored_fields(self.top_n[n][1])
def facet_names(self):
"""Returns the available facet names, for use with the ``groups()``
method.
"""
return self._facetmaps.keys()
def groups(self, name=None):
"""If you generated facet groupings for the results using the
`groupedby` keyword argument to the ``search()`` method, you can use
this method to retrieve the groups. You can use the ``facet_names()``
method to get the list of available facet names.
>>> results = searcher.search(my_query, groupedby=["tag", "price"])
>>> results.facet_names()
["tag", "price"]
>>> results.groups("tag")
{"new": [12, 1, 4], "apple": [3, 10, 5], "search": [11]}
If you only used one facet, you can call the method without a facet
name to get the groups for the facet.
>>> results = searcher.search(my_query, groupedby="tag")
>>> results.groups()
{"new": [12, 1, 4], "apple": [3, 10, 5, 0], "search": [11]}
By default, this returns a dictionary mapping category names to a list
of document numbers, in the same relative order as they appear in the
results.
>>> results = mysearcher.search(myquery, groupedby="tag")
>>> docnums = results.groups()
>>> docnums['new']
[12, 1, 4]
You can then use :meth:`Searcher.stored_fields` to get the stored
fields associated with a document ID.
If you specified a different ``maptype`` for the facet when you
searched, the values in the dictionary depend on the
:class:`whoosh.sorting.FacetMap`.
>>> myfacet = sorting.FieldFacet("tag", maptype=sorting.Count)
>>> results = mysearcher.search(myquery, groupedby=myfacet)
>>> counts = results.groups()
{"new": 3, "apple": 4, "search": 1}
"""
if (name is None or name == "facet") and len(self._facetmaps) == 1:
# If there's only one facet, just use it; convert keys() to list
# for Python 3
name = list(self._facetmaps.keys())[0]
elif name not in self._facetmaps:
raise KeyError("%r not in facet names %r"
% (name, self.facet_names()))
return self._facetmaps[name].as_dict()
def has_exact_length(self):
"""Returns True if this results object already knows the exact number
of matching documents.
"""
if self.collector:
return self.collector.computes_count()
else:
return self._total is not None
def estimated_length(self):
"""The estimated maximum number of matching documents, or the
exact number of matching documents if it's known.
"""
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_size(self.searcher.reader())
def estimated_min_length(self):
"""The estimated minimum number of matching documents, or the
exact number of matching documents if it's known.
"""
if self.has_exact_length():
return len(self)
else:
return self.q.estimate_min_size(self.searcher.reader())
def scored_length(self):
"""Returns the number of scored documents in the results, equal to or
less than the ``limit`` keyword argument to the search.
>>> r = mysearcher.search(myquery, limit=20)
>>> len(r)
1246
>>> r.scored_length()
20
This may be fewer than the total number of documents that match the
query, which is what ``len(Results)`` returns.
"""
return len(self.top_n)
def docs(self):
"""Returns a set-like object containing the document numbers that
matched the query.
"""
if self.docset is None:
self.docset = set(self.collector.all_ids())
return self.docset
def copy(self):
"""Returns a deep copy of this results object.
"""
# Shallow copy self to get attributes
r = copy.copy(self)
# Deep copies of docset and top_n in case they're modified
r.docset = copy.deepcopy(self.docset)
r.top_n = copy.deepcopy(self.top_n)
return r
def score(self, n):
"""Returns the score for the document at the Nth position in the list
of ranked documents. If the search was not scored, this may return
None.
"""
return self.top_n[n][0]
def docnum(self, n):
"""Returns the document number of the result at position n in the list
of ranked documents.
"""
return self.top_n[n][1]
def query_terms(self, expand=False, fieldname=None):
return self.q.existing_terms(self.searcher.reader(),
fieldname=fieldname, expand=expand)
def has_matched_terms(self):
"""Returns True if the search recorded which terms matched in which
documents.
>>> r = searcher.search(myquery)
>>> r.has_matched_terms()
False
>>>
"""
return hasattr(self, "docterms") and hasattr(self, "termdocs")
def matched_terms(self):
"""Returns the set of ``("fieldname", "text")`` tuples representing
terms from the query that matched one or more of the TOP N documents
(this does not report terms for documents that match the query but did
not score high enough to make the top N results). You can compare this
set to the terms from the original query to find terms which didn't
occur in any matching documents.
This is only valid if you used ``terms=True`` in the search call to
record matching terms. Otherwise it will raise an exception.
>>> q = myparser.parse("alfa OR bravo OR charlie")
>>> results = searcher.search(q, terms=True)
>>> results.terms()
set([("content", "alfa"), ("content", "charlie")])
>>> q.all_terms() - results.terms()
set([("content", "bravo")])
"""
if not self.has_matched_terms():
raise NoTermsException
return set(self.termdocs.keys())
def _get_fragmenter(self):
return self.highlighter.fragmenter
def _set_fragmenter(self, f):
self.highlighter.fragmenter = f
fragmenter = property(_get_fragmenter, _set_fragmenter)
def _get_formatter(self):
return self.highlighter.formatter
def _set_formatter(self, f):
self.highlighter.formatter = f
formatter = property(_get_formatter, _set_formatter)
def _get_scorer(self):
return self.highlighter.scorer
def _set_scorer(self, s):
self.highlighter.scorer = s
scorer = property(_get_scorer, _set_scorer)
def _get_order(self):
return self.highlighter.order
def _set_order(self, o):
self.highlighter.order = o
order = property(_get_order, _set_order)
def key_terms(self, fieldname, docs=10, numterms=5,
model=classify.Bo1Model, normalize=True):
"""Returns the 'numterms' most important terms from the top 'docs'
documents in these results. "Most important" is generally defined as
terms that occur frequently in the top hits but relatively infrequently
in the collection as a whole.
:param fieldname: Look at the terms in this field. This field must
store vectors.
:param docs: Look at this many of the top documents of the results.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify
module.
:returns: list of unicode strings.
"""
if not len(self):
return []
docs = min(docs, len(self))
reader = self.searcher.reader()
expander = classify.Expander(reader, fieldname, model=model)
for _, docnum in self.top_n[:docs]:
expander.add_document(docnum)
return expander.expanded_terms(numterms, normalize=normalize)
def extend(self, results):
"""Appends hits from 'results' (that are not already in this
results object) to the end of these results.
:param results: another results object.
"""
docs = self.docs()
for item in results.top_n:
if item[1] not in docs:
self.top_n.append(item)
self.docset = docs | results.docs()
def filter(self, results):
"""Removes any hits that are not also in the other results object.
"""
if not len(results):
return
otherdocs = results.docs()
items = [item for item in self.top_n if item[1] in otherdocs]
self.docset = self.docs() & otherdocs
self.top_n = items
def upgrade(self, results, reverse=False):
"""Re-sorts the results so any hits that are also in 'results' appear
before hits not in 'results', otherwise keeping their current relative
positions. This does not add the documents in the other results object
to this one.
:param results: another results object.
:param reverse: if True, lower the position of hits in the other
results object instead of raising them.
"""
if not len(results):
return
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
if reverse:
items = notin + arein
else:
items = arein + notin
self.top_n = items
def upgrade_and_extend(self, results):
"""Combines the effects of extend() and upgrade(): hits that are also
in 'results' are raised. Then any hits from the other results object
that are not in this results object are appended to the end.
:param results: another results object.
"""
if not len(results):
return
docs = self.docs()
otherdocs = results.docs()
arein = [item for item in self.top_n if item[1] in otherdocs]
notin = [item for item in self.top_n if item[1] not in otherdocs]
other = [item for item in results.top_n if item[1] not in docs]
self.docset = docs | otherdocs
self.top_n = arein + notin + other
class Hit(object):
"""Represents a single search result ("hit") in a Results object.
This object acts like a dictionary of the matching document's stored
fields. If for some reason you need an actual ``dict`` object, use
``Hit.fields()`` to get one.
>>> r = searcher.search(query.Term("content", "render"))
>>> r[0]
< Hit {title = u"Rendering the scene"} >
>>> r[0].rank
0
>>> r[0].docnum == 4592
True
>>> r[0].score
2.52045682
>>> r[0]["title"]
"Rendering the scene"
>>> r[0].keys()
["title"]
"""
def __init__(self, results, docnum, pos=None, score=None):
"""
:param results: the Results object this hit belongs to.
:param pos: the position in the results list of this hit, for example
pos = 0 means this is the first (highest scoring) hit.
:param docnum: the document number of this hit.
:param score: the score of this hit.
"""
self.results = results
self.searcher = results.searcher
self.reader = self.searcher.reader()
self.pos = self.rank = pos
self.docnum = docnum
self.score = score
self._fields = None
def fields(self):
"""Returns a dictionary of the stored fields of the document this
object represents.
"""
if self._fields is None:
self._fields = self.searcher.stored_fields(self.docnum)
return self._fields
def matched_terms(self):
"""Returns the set of ``("fieldname", "text")`` tuples representing
terms from the query that matched in this document. You can
compare this set to the terms from the original query to find terms
which didn't occur in this document.
This is only valid if you used ``terms=True`` in the search call to
record matching terms. Otherwise it will raise an exception.
>>> q = myparser.parse("alfa OR bravo OR charlie")
>>> results = searcher.search(q, terms=True)
>>> for hit in results:
... print(hit["title"])
... print("Contains:", hit.matched_terms())
... print("Doesn't contain:", q.all_terms() - hit.matched_terms())
"""
if not self.results.has_matched_terms():
raise NoTermsException
return self.results.docterms.get(self.docnum, [])
def highlights(self, fieldname, text=None, top=3, minscore=1):
"""Returns highlighted snippets from the given field::
r = searcher.search(myquery)
for hit in r:
print(hit["title"])
print(hit.highlights("content"))
See :doc:`/highlight`.
To change the fragmeter, formatter, order, or scorer used in
highlighting, you can set attributes on the results object::
from whoosh import highlight
results = searcher.search(myquery, terms=True)
results.fragmenter = highlight.SentenceFragmenter()
...or use a custom :class:`whoosh.highlight.Highlighter` object::
hl = highlight.Highlighter(fragmenter=sf)
results.highlighter = hl
:param fieldname: the name of the field you want to highlight.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document. If the field
you want to highlight isn't stored in the index, but you have
access to the text another way (for example, loading from a file or
a database), you can supply it using the ``text`` parameter.
:param top: the maximum number of fragments to return.
:param minscore: the minimum score for fragments to appear in the
highlights.
"""
hliter = self.results.highlighter
return hliter.highlight_hit(self, fieldname, text=text, top=top,
minscore=minscore)
def more_like_this(self, fieldname, text=None, top=10, numterms=5,
model=classify.Bo1Model, normalize=True, filter=None):
"""Returns a new Results object containing documents similar to this
hit, based on "key terms" in the given field::
r = searcher.search(myquery)
for hit in r:
print(hit["title"])
print("Top 3 similar documents:")
for subhit in hit.more_like_this("content", top=3):
print(" ", subhit["title"])
:param fieldname: the name of the field to use to test similarity.
:param text: by default, the method will attempt to load the contents
of the field from the stored fields for the document, or from a
term vector. If the field isn't stored or vectored in the index,
but you have access to the text another way (for example, loading
from a file or a database), you can supply it using the ``text``
parameter.
:param top: the number of results to return.
:param numterms: the number of "key terms" to extract from the hit and
search for. Using more terms is slower but gives potentially more
and more accurate results.
:param model: (expert) a :class:`whoosh.classify.ExpansionModel` to use
to compute "key terms".
:param normalize: whether to normalize term weights.
"""
return self.searcher.more_like(self.docnum, fieldname, text=text,
top=top, numterms=numterms, model=model,
normalize=normalize, filter=filter)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.fields())
def __eq__(self, other):
if isinstance(other, Hit):
return self.fields() == other.fields()
elif isinstance(other, dict):
return self.fields() == other
else:
return False
def __len__(self):
return len(self.fields())
def __iter__(self):
return iterkeys(self.fields())
def __getitem__(self, fieldname):
if fieldname in self.fields():
return self._fields[fieldname]
reader = self.reader
if reader.has_column(fieldname):
cr = reader.column_reader(fieldname)
return cr[self.docnum]
raise KeyError(fieldname)
def __contains__(self, key):
return (key in self.fields()
or self.reader.has_column(key))
def items(self):
return list(self.fields().items())
def keys(self):
return list(self.fields().keys())
def values(self):
return list(self.fields().values())
def iteritems(self):
return iteritems(self.fields())
def iterkeys(self):
return iterkeys(self.fields())
def itervalues(self):
return itervalues(self.fields())
def get(self, key, default=None):
return self.fields().get(key, default)
def __setitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def __delitem__(self, key, value):
raise NotImplementedError("You cannot modify a search result")
def clear(self):
raise NotImplementedError("You cannot modify a search result")
def update(self, dict=None, **kwargs):
raise NotImplementedError("You cannot modify a search result")
class ResultsPage(object):
"""Represents a single page out of a longer list of results, as returned
by :func:`whoosh.searching.Searcher.search_page`. Supports a subset of the
interface of the :class:`~whoosh.searching.Results` object, namely getting
stored fields with __getitem__ (square brackets), iterating, and the
``score()`` and ``docnum()`` methods.
The ``offset`` attribute contains the results number this page starts at
(numbered from 0). For example, if the page length is 10, the ``offset``
attribute on the second page will be ``10``.
The ``pagecount`` attribute contains the number of pages available.
The ``pagenum`` attribute contains the page number. This may be less than
the page you requested if the results had too few pages. For example, if
you do::
ResultsPage(results, 5)
but the results object only contains 3 pages worth of hits, ``pagenum``
will be 3.
The ``pagelen`` attribute contains the number of results on this page
(which may be less than the page length you requested if this is the last
page of the results).
The ``total`` attribute contains the total number of hits in the results.
>>> mysearcher = myindex.searcher()
>>> pagenum = 2
>>> page = mysearcher.find_page(pagenum, myquery)
>>> print("Page %s of %s, results %s to %s of %s" %
... (pagenum, page.pagecount, page.offset+1,
... page.offset+page.pagelen, page.total))
>>> for i, fields in enumerate(page):
... print("%s. %r" % (page.offset + i + 1, fields))
>>> mysearcher.close()
To set highlighter attributes (for example ``formatter``), access the
underlying :class:`Results` object::
page.results.formatter = highlight.UppercaseFormatter()
"""
def __init__(self, results, pagenum, pagelen=10):
"""
:param results: a :class:`~whoosh.searching.Results` object.
:param pagenum: which page of the results to use, numbered from ``1``.
:param pagelen: the number of hits per page.
"""
self.results = results
self.total = len(results)
if pagenum < 1:
raise ValueError("pagenum must be >= 1")
self.pagecount = int(ceil(self.total / pagelen))
self.pagenum = min(self.pagecount, pagenum)
offset = (self.pagenum - 1) * pagelen
if (offset + pagelen) > self.total:
pagelen = self.total - offset
self.offset = offset
self.pagelen = pagelen
def __getitem__(self, n):
offset = self.offset
if isinstance(n, slice):
start, stop, step = n.indices(self.pagelen)
return self.results.__getitem__(slice(start + offset,
stop + offset, step))
else:
return self.results.__getitem__(n + offset)
def __iter__(self):
return iter(self.results[self.offset:self.offset + self.pagelen])
def __len__(self):
return self.total
def scored_length(self):
return self.results.scored_length()
def score(self, n):
"""Returns the score of the hit at the nth position on this page.
"""
return self.results.score(n + self.offset)
def docnum(self, n):
"""Returns the document number of the hit at the nth position on this
page.
"""
return self.results.docnum(n + self.offset)
def is_last_page(self):
"""Returns True if this object represents the last page of results.
"""
return self.pagecount == 0 or self.pagenum == self.pagecount
| apache-2.0 |
mitodl/micromasters | micromasters/settings.py | 1 | 21620 | """
Django settings for MicroMasters.
"""
import logging
import os
import platform
from urllib.parse import urljoin
import dj_database_url
from celery.schedules import crontab
from django.core.exceptions import ImproperlyConfigured
from micromasters.envs import (
get_any,
get_bool,
get_int,
get_list_of_str,
get_string,
)
from micromasters.sentry import init_sentry
VERSION = "0.199.4"
# initialize Sentry before doing anything else so we capture any config errors
ENVIRONMENT = get_string('MICROMASTERS_ENVIRONMENT', 'dev')
SENTRY_DSN = get_string("SENTRY_DSN", "")
SENTRY_LOG_LEVEL = get_string("SENTRY_LOG_LEVEL", "ERROR")
init_sentry(
dsn=SENTRY_DSN, environment=ENVIRONMENT, version=VERSION, log_level=SENTRY_LOG_LEVEL
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_string(
'SECRET_KEY',
'36boam8miiz0c22il@3&gputb=wrqr2plah=0#0a_bknw9(2^r'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_bool('DEBUG', False)
if DEBUG:
# Disabling the protection added in 1.10.3 against a DNS rebinding vulnerability:
# https://docs.djangoproject.com/en/1.10/releases/1.10.3/#dns-rebinding-vulnerability-when-debug-true
# Because we never debug against production data, we are not vulnerable
# to this problem.
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = get_list_of_str('ALLOWED_HOSTS', [])
SECURE_SSL_REDIRECT = get_bool('MICROMASTERS_SECURE_SSL_REDIRECT', True)
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': [
r'.+\.hot-update\.+',
r'.+\.js\.map'
]
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'server_status',
'social_django',
# WAGTAIL
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.table_block',
'wagtail.contrib.legacy.richtext',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
# Hijack
'hijack',
'compat',
'hijack_admin',
# other third party APPS
'rolepermissions',
'corsheaders',
# Our INSTALLED_APPS
'backends',
'cms',
'courses',
'dashboard',
'discussions',
'ecommerce',
'exams',
'financialaid',
'grades',
'mail',
'profiles',
'roles',
'search',
'ui',
'seed_data',
'selenium_tests',
)
DISABLE_WEBPACK_LOADER_STATS = get_bool("DISABLE_WEBPACK_LOADER_STATS", False)
if not DISABLE_WEBPACK_LOADER_STATS:
INSTALLED_APPS += ('webpack_loader',)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
# enable the nplusone profiler only in debug mode
if DEBUG:
INSTALLED_APPS += (
'nplusone.ext.django',
)
MIDDLEWARE += (
'nplusone.ext.django.NPlusOneMiddleware',
)
AUTHENTICATION_BACKENDS = (
'backends.edxorg.EdxOrgOAuth2',
# the following needs to stay here to allow login of local users
'django.contrib.auth.backends.ModelBackend',
)
SESSION_ENGINE = get_string('SESSION_ENGINE', 'django.contrib.sessions.backends.signed_cookies')
SESSION_COOKIE_NAME = get_string('SESSION_COOKIE_NAME', 'sessionid')
EDXORG_BASE_URL = get_string('EDXORG_BASE_URL', 'https://courses.edx.org/')
SOCIAL_AUTH_EDXORG_KEY = get_string('EDXORG_CLIENT_ID', '')
SOCIAL_AUTH_EDXORG_SECRET = get_string('EDXORG_CLIENT_SECRET', '')
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'backends.pipeline_api.check_edx_verified_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
# the following custom pipeline func goes before load_extra_data
'backends.pipeline_api.set_last_update',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'backends.pipeline_api.update_profile_from_edx',
)
SOCIAL_AUTH_EDXORG_AUTH_EXTRA_ARGUMENTS = {
'access_type': 'offline',
'approval_prompt': 'auto'
}
SOCIAL_AUTH_EDXORG_EXTRA_DATA = ['updated_at']
LOGIN_REDIRECT_URL = '/dashboard'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = '/'
LOGIN_ERROR_URL = '/'
OAUTH_MAINTENANCE_MODE = get_bool('OAUTH_MAINTENANCE_MODE', False)
ROOT_URLCONF = 'micromasters.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'ui.context_processors.api_keys',
'ui.context_processors.do_not_track',
],
},
},
]
WSGI_APPLICATION = 'micromasters.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Uses DATABASE_URL to configure with sqlite default:
# For URL structure:
# https://github.com/kennethreitz/dj-database-url
DEFAULT_DATABASE_CONFIG = dj_database_url.parse(
get_string(
'DATABASE_URL',
'sqlite:///{0}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))
)
)
DEFAULT_DATABASE_CONFIG['CONN_MAX_AGE'] = get_int('MICROMASTERS_DB_CONN_MAX_AGE', 0)
# If True, disables server-side database cursors to prevent invalid cursor errors when using pgbouncer
DEFAULT_DATABASE_CONFIG["DISABLE_SERVER_SIDE_CURSORS"] = get_bool(
"MICROMASTERS_DB_DISABLE_SS_CURSORS", True
)
if get_bool('MICROMASTERS_DB_DISABLE_SSL', False):
DEFAULT_DATABASE_CONFIG['OPTIONS'] = {}
else:
DEFAULT_DATABASE_CONFIG['OPTIONS'] = {'sslmode': 'require'}
DATABASES = {
'default': DEFAULT_DATABASE_CONFIG
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Serve static files with dj-static
STATIC_URL = '/static/'
CLOUDFRONT_DIST = get_string('CLOUDFRONT_DIST', None)
if CLOUDFRONT_DIST:
STATIC_URL = urljoin('https://{dist}.cloudfront.net'.format(dist=CLOUDFRONT_DIST), STATIC_URL)
# Configure Django Storages to use Cloudfront distribution for S3 assets
AWS_S3_CUSTOM_DOMAIN = '{dist}.cloudfront.net'.format(dist=CLOUDFRONT_DIST)
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'EXCEPTION_HANDLER': 'micromasters.utils.custom_exception_handler'
}
# Request files from the webpack dev server
USE_WEBPACK_DEV_SERVER = get_bool('MICROMASTERS_USE_WEBPACK_DEV_SERVER', False)
WEBPACK_DEV_SERVER_HOST = get_string('WEBPACK_DEV_SERVER_HOST', '')
WEBPACK_DEV_SERVER_PORT = get_int('WEBPACK_DEV_SERVER_PORT', 8078)
# Important to define this so DEBUG works properly
INTERNAL_IPS = (get_string('HOST_IP', '127.0.0.1'), )
# Configure e-mail settings
EMAIL_BACKEND = get_string('MICROMASTERS_EMAIL_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = get_string('MICROMASTERS_EMAIL_HOST', 'localhost')
EMAIL_PORT = get_int('MICROMASTERS_EMAIL_PORT', 25)
EMAIL_HOST_USER = get_string('MICROMASTERS_EMAIL_USER', '')
EMAIL_HOST_PASSWORD = get_string('MICROMASTERS_EMAIL_PASSWORD', '')
EMAIL_USE_TLS = get_bool('MICROMASTERS_EMAIL_TLS', False)
EMAIL_SUPPORT = get_string('MICROMASTERS_SUPPORT_EMAIL', 'support@example.com')
DEFAULT_FROM_EMAIL = get_string('MICROMASTERS_FROM_EMAIL', 'webmaster@localhost')
ECOMMERCE_EMAIL = get_string('MICROMASTERS_ECOMMERCE_EMAIL', 'support@example.com')
MAILGUN_URL = get_string('MAILGUN_URL', None)
if not MAILGUN_URL:
raise ImproperlyConfigured("MAILGUN_URL not set")
MAILGUN_KEY = get_string('MAILGUN_KEY', None)
if not MAILGUN_KEY:
raise ImproperlyConfigured("MAILGUN_KEY not set")
MAILGUN_BATCH_CHUNK_SIZE = get_int('MAILGUN_BATCH_CHUNK_SIZE', 1000)
MAILGUN_RECIPIENT_OVERRIDE = get_string('MAILGUN_RECIPIENT_OVERRIDE', None)
MAILGUN_FROM_EMAIL = get_string('MAILGUN_FROM_EMAIL', 'no-reply@micromasters.mit.edu')
MAILGUN_BCC_TO_EMAIL = get_string('MAILGUN_BCC_TO_EMAIL', 'no-reply@micromasters.mit.edu')
# e-mail configurable admins
ADMIN_EMAIL = get_string('MICROMASTERS_ADMIN_EMAIL', '')
if ADMIN_EMAIL != '':
ADMINS = (('Admins', ADMIN_EMAIL),)
else:
ADMINS = ()
# Logging configuration
LOG_LEVEL = get_string('MICROMASTERS_LOG_LEVEL', 'INFO')
DJANGO_LOG_LEVEL = get_string('DJANGO_LOG_LEVEL', 'INFO')
ES_LOG_LEVEL = get_string('ES_LOG_LEVEL', 'INFO')
# For logging to a remote syslog host
LOG_HOST = get_string('MICROMASTERS_LOG_HOST', 'localhost')
LOG_HOST_PORT = get_int('MICROMASTERS_LOG_HOST_PORT', 514)
HOSTNAME = platform.node().split('.')[0]
# nplusone profiler logger configuration
NPLUSONE_LOGGER = logging.getLogger('nplusone')
NPLUSONE_LOG_LEVEL = logging.ERROR
# paramiko logger configuration
# default log level to critical to silence everything
PARAMIKO_LOG_LEVEL = get_string('PARAMIKO_LOG_LEVEL', 'CRITICAL')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s %(process)d [%(name)s] '
'%(filename)s:%(lineno)d - '
'[{hostname}] - %(message)s'
).format(hostname=HOSTNAME),
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'syslog': {
'level': LOG_LEVEL,
'class': 'logging.handlers.SysLogHandler',
'facility': 'local7',
'formatter': 'verbose',
'address': (LOG_HOST, LOG_HOST_PORT)
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django': {
'propagate': True,
'level': DJANGO_LOG_LEVEL,
'handlers': ['console', 'syslog'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': DJANGO_LOG_LEVEL,
'propagate': True,
},
'urllib3': {
'level': ES_LOG_LEVEL,
},
'elasticsearch': {
'level': ES_LOG_LEVEL,
},
'nplusone': {
'handlers': ['console'],
'level': 'ERROR',
},
'paramiko': {
'level': PARAMIKO_LOG_LEVEL,
},
},
'root': {
'handlers': ['console', 'syslog'],
'level': LOG_LEVEL,
},
}
# CORS
CORS_ORIGIN_WHITELIST = get_list_of_str("MICROMASTERS_CORS_ORIGIN_WHITELIST", [])
CORS_ALLOW_CREDENTIALS = True
# server-status
STATUS_TOKEN = get_string("STATUS_TOKEN", "")
HEALTH_CHECK = ['CELERY', 'REDIS', 'POSTGRES', 'ELASTIC_SEARCH']
ADWORDS_CONVERSION_ID = get_string("ADWORDS_CONVERSION_ID", "")
GA_TRACKING_ID = get_string("GA_TRACKING_ID", "")
GOOGLE_API_KEY = get_string("GOOGLE_API_KEY", "")
GTM_CONTAINER_ID = get_string("GTM_CONTAINER_ID", "")
SL_TRACKING_ID = get_string("SL_TRACKING_ID", "")
REACT_GA_DEBUG = get_bool("REACT_GA_DEBUG", False)
# Hijack
HIJACK_ALLOW_GET_REQUESTS = True
HIJACK_LOGOUT_REDIRECT_URL = '/admin/auth/user'
# Wagtail
WAGTAIL_SITE_NAME = "MIT MicroMasters"
WAGTAILIMAGES_MAX_UPLOAD_SIZE = get_int('WAGTAILIMAGES_MAX_UPLOAD_SIZE', 20971620) # default 25 MB
MEDIA_ROOT = get_string('MEDIA_ROOT', '/var/media/')
MEDIA_URL = '/media/'
MICROMASTERS_USE_S3 = get_bool('MICROMASTERS_USE_S3', False)
AWS_ACCESS_KEY_ID = get_string('AWS_ACCESS_KEY_ID', False)
AWS_SECRET_ACCESS_KEY = get_string('AWS_SECRET_ACCESS_KEY', False)
AWS_STORAGE_BUCKET_NAME = get_string('AWS_STORAGE_BUCKET_NAME', False)
AWS_S3_FILE_OVERWRITE = get_bool('AWS_S3_FILE_OVERWRITE', False)
AWS_QUERYSTRING_AUTH = get_string('AWS_QUERYSTRING_AUTH', False)
# Provide nice validation of the configuration
if (
MICROMASTERS_USE_S3 and
(not AWS_ACCESS_KEY_ID or
not AWS_SECRET_ACCESS_KEY or
not AWS_STORAGE_BUCKET_NAME)
):
raise ImproperlyConfigured(
'You have enabled S3 support, but are missing one of '
'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, or '
'AWS_STORAGE_BUCKET_NAME'
)
if MICROMASTERS_USE_S3:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
else:
# by default use django.core.files.storage.FileSystemStorage with
# overwrite feature
DEFAULT_FILE_STORAGE = 'storages.backends.overwrite.OverwriteStorage'
# Celery
USE_CELERY = True
# for the following variables keep backward compatibility for the environment variables
# the part after "or" can be removed after we replace the environment variables in production
CELERY_BROKER_URL = get_string(
"CELERY_BROKER_URL", get_string("REDISCLOUD_URL", None)
) or get_string("BROKER_URL", get_string("REDISCLOUD_URL", None))
CELERY_RESULT_BACKEND = get_string(
"CELERY_RESULT_BACKEND", get_string("REDISCLOUD_URL", None)
)
CELERY_TASK_ALWAYS_EAGER = get_bool("CELERY_TASK_ALWAYS_EAGER", False) or get_bool("CELERY_ALWAYS_EAGER", False)
CELERY_TASK_EAGER_PROPAGATES = (get_bool("CELERY_TASK_EAGER_PROPAGATES", True) or
get_bool("CELERY_EAGER_PROPAGATES_EXCEPTIONS", True))
CELERY_BEAT_SCHEDULE = {
'batch-update-user-data-every-6-hrs': {
'task': 'dashboard.tasks.batch_update_user_data',
'schedule': crontab(minute=0, hour='*/6')
},
'update-currency-exchange-rates-every-24-hrs': {
'task': 'financialaid.tasks.sync_currency_exchange_rates',
'schedule': crontab(minute=0, hour='3')
},
'authorize_exam_runs-every-1-hrs': {
'task': 'exams.tasks.authorize_exam_runs',
'schedule': crontab(minute=0, hour='*')
},
'generate-mm-course-certificates-every-1-hrs': {
'task': 'grades.tasks.generate_course_certificates_for_fa_students',
'schedule': crontab(minute=0, hour='*')
},
'discussions-sync-memberships-every-minute': {
'task': 'discussions.tasks.sync_channel_memberships',
'schedule': crontab(minute='*', hour='*')
},
'freeze-final-grades-every-24-hrs-few-times': {
'task': 'grades.tasks.find_course_runs_and_freeze_grades',
'schedule': crontab(minute='*/15', hour='16')
},
'create-combined-final-grade-every-1-hrs': {
'task': 'grades.tasks.create_combined_final_grades',
'schedule': crontab(minute=40, hour='*')
},
}
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'UTC'
# Celery parallel rate limit for batch_update_user_data
# This is the number of tasks per minute, each task updates data for 20 users
BATCH_UPDATE_RATE_LIMIT = get_string('BATCH_UPDATE_RATE_LIMIT', '5/m')
# django cache back-ends
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'local-in-memory-cache',
},
'redis': {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": CELERY_BROKER_URL,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"COMPRESSOR": "django_redis.compressors.zlib.ZlibCompressor",
},
},
}
# Elasticsearch
ELASTICSEARCH_DEFAULT_PAGE_SIZE = get_int('ELASTICSEARCH_DEFAULT_PAGE_SIZE', 50)
ELASTICSEARCH_URL = get_string("ELASTICSEARCH_URL", None)
if get_string("HEROKU_PARENT_APP_NAME", None) is not None:
ELASTICSEARCH_INDEX = get_string('HEROKU_APP_NAME', None)
else:
ELASTICSEARCH_INDEX = get_string('ELASTICSEARCH_INDEX', None)
if not ELASTICSEARCH_INDEX:
raise ImproperlyConfigured("Missing ELASTICSEARCH_INDEX")
ELASTICSEARCH_HTTP_AUTH = get_string("ELASTICSEARCH_HTTP_AUTH", None)
ELASTICSEARCH_SHARD_COUNT = get_int('ELASTICSEARCH_SHARD_COUNT', 5)
# django-role-permissions
ROLEPERMISSIONS_MODULE = 'roles.roles'
# edx
EDX_BATCH_UPDATES_ENABLED = get_bool("EDX_BATCH_UPDATES_ENABLED", True)
# Cybersource
CYBERSOURCE_ACCESS_KEY = get_string("CYBERSOURCE_ACCESS_KEY", None)
CYBERSOURCE_SECURITY_KEY = get_string("CYBERSOURCE_SECURITY_KEY", None)
CYBERSOURCE_SECURE_ACCEPTANCE_URL = get_string("CYBERSOURCE_SECURE_ACCEPTANCE_URL", None)
CYBERSOURCE_PROFILE_ID = get_string("CYBERSOURCE_PROFILE_ID", None)
CYBERSOURCE_REFERENCE_PREFIX = get_string("CYBERSOURCE_REFERENCE_PREFIX", None)
# Open Exchange Rates
OPEN_EXCHANGE_RATES_URL = get_string("OPEN_EXCHANGE_RATES_URL", "https://openexchangerates.org/api/")
OPEN_EXCHANGE_RATES_APP_ID = get_string("OPEN_EXCHANGE_RATES_APP_ID", "")
# Open Discussions
OPEN_DISCUSSIONS_API_USERNAME = get_string('OPEN_DISCUSSIONS_API_USERNAME', None)
OPEN_DISCUSSIONS_BASE_URL = get_string('OPEN_DISCUSSIONS_BASE_URL', None)
OPEN_DISCUSSIONS_COOKIE_DOMAIN = get_string('OPEN_DISCUSSIONS_COOKIE_DOMAIN', None)
OPEN_DISCUSSIONS_JWT_EXPIRES_DELTA = get_int('OPEN_DISCUSSIONS_JWT_EXPIRES_DELTA', 60*60)
OPEN_DISCUSSIONS_COOKIE_NAME = get_string('OPEN_DISCUSSIONS_COOKIE_NAME', None)
OPEN_DISCUSSIONS_JWT_SECRET = get_string('OPEN_DISCUSSIONS_JWT_SECRET', None)
OPEN_DISCUSSIONS_REDIRECT_URL = get_string('OPEN_DISCUSSIONS_REDIRECT_URL', None)
OPEN_DISCUSSIONS_REDIRECT_COMPLETE_URL = get_string('OPEN_DISCUSSIONS_REDIRECT_COMPLETE_URL', '/')
OPEN_DISCUSSIONS_SITE_KEY = get_string('OPEN_DISCUSSIONS_SITE_KEY', None)
if not OPEN_DISCUSSIONS_SITE_KEY:
raise ImproperlyConfigured("OPEN_DISCUSSIONS_SITE_KEY must be specified")
# features flags
def get_all_config_keys():
"""Returns all the configuration keys from both environment and configuration files"""
return list(os.environ.keys())
MM_FEATURES_PREFIX = get_string('MM_FEATURES_PREFIX', 'FEATURE_')
FEATURES = {
key[len(MM_FEATURES_PREFIX):]: get_any(key, None) for key
in get_all_config_keys() if key.startswith(MM_FEATURES_PREFIX)
}
MIDDLEWARE_FEATURE_FLAG_QS_PREFIX = get_string("MIDDLEWARE_FEATURE_FLAG_QS_PREFIX", None)
MIDDLEWARE_FEATURE_FLAG_COOKIE_NAME = get_string('MIDDLEWARE_FEATURE_FLAG_COOKIE_NAME', 'MM_FEATURE_FLAGS')
MIDDLEWARE_FEATURE_FLAG_COOKIE_MAX_AGE_SECONDS = get_int('MIDDLEWARE_FEATURE_FLAG_COOKIE_MAX_AGE_SECONDS', 60 * 60)
if MIDDLEWARE_FEATURE_FLAG_QS_PREFIX:
MIDDLEWARE = MIDDLEWARE + (
'ui.middleware.QueryStringFeatureFlagMiddleware',
'ui.middleware.CookieFeatureFlagMiddleware',
)
# django debug toolbar only in debug mode
if DEBUG:
INSTALLED_APPS += ('debug_toolbar', )
# it needs to be enabled before other middlewares
MIDDLEWARE = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE
def show_toolbar(request):
"""
Custom function needed because of bug in wagtail.
Theoretically this bug has been fixed in django 1.10 and wagtail 1.6.3
so if we upgrade we should be able to change this function to just
return True.
"""
request.META["wsgi.multithread"] = True
request.META["wsgi.multiprocess"] = True
excluded_urls = ['/pages/preview/', '/pages/preview_loading/', '/edit/preview/']
excluded = any(request.path.endswith(url) for url in excluded_urls)
return not excluded
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": show_toolbar, }
# Travis
IS_CI_ENV = get_bool('CI', False)
HUBSPOT_CONFIG = {
"HUBSPOT_ORGANIZATIONS_FORM_GUID": get_string(
name="HUBSPOT_ORGANIZATIONS_FORM_GUID",
default="1b63db1a-eb3a-45d6-82f1-c4b8f01835dc",
),
"HUBSPOT_PORTAL_ID": get_string(
name="HUBSPOT_PORTAL_ID", default="8677455"
),
}
| bsd-3-clause |
flavour/tldrmp | modules/s3db/survey.py | 1 | 135539 | # -*- coding: utf-8 -*-
""" Sahana Eden Survey Tool
@copyright: 2011-2013 (c) Sahana Software Foundation
@license: MIT
ADAT - Assessment Data Analysis Tool
For more details see the blueprint at:
http://eden.sahanafoundation.org/wiki/BluePrint/SurveyTool/ADAT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3SurveyTemplateModel",
"S3SurveyQuestionModel",
"S3SurveyFormatterModel",
"S3SurveySeriesModel",
"S3SurveyCompleteModel",
"S3SurveyTranslateModel",
"survey_template_represent",
"survey_series_represent",
"survey_answer_list_represent",
"survey_template_rheader",
"survey_series_rheader",
"survey_getAllSectionsForTemplate",
"survey_getAllQuestionsForTemplate",
"survey_buildQuestionnaireFromTemplate",
"survey_buildQuestionnaireFromSeries",
"survey_getTemplateFromSeries",
"survey_getAllTemplates",
"survey_getAllWidgetsForTemplate",
"survey_getWidgetFromQuestion",
"survey_getAllSectionsForSeries",
"survey_getAllSectionsForTemplate",
"survey_getQuestionFromCode",
"survey_getAllQuestionsForTemplate",
"survey_getAllQuestionsForSeries",
"survey_getAllQuestionsForComplete",
"survey_save_answers_for_series",
"survey_updateMetaData",
"survey_getAllAnswersForQuestionInSeries",
"survey_getQstnLayoutRules",
"survey_getSeries",
"survey_getSeriesName",
"survey_getAllSeries",
"survey_getAllTranslationsForTemplate",
"survey_getAllTranslationsForSeries",
"survey_build_template_summary",
"survey_serieslist_dataTable_post",
"survey_answerlist_dataTable_pre",
"survey_answerlist_dataTable_post",
"survey_json2py",
"survey_json2list",
]
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3chart import S3Chart
from s3survey import survey_question_type, \
survey_analysis_type, \
_debug
# =============================================================================
def json2py(jsonstr):
"""
Utility function to convert a string in json to a python structure
"""
from xml.sax.saxutils import unescape
if not isinstance(jsonstr, str):
return jsonstr
try:
jsonstr = unescape(jsonstr, {"u'": '"'})
jsonstr = unescape(jsonstr, {"'": '"'})
pythonStructure = json.loads(jsonstr)
except:
_debug("ERROR: attempting to convert %s using modules/s3db/survey/json2py.py" % (jsonstr))
return jsonstr
else:
return pythonStructure
survey_json2py = json2py
# =============================================================================
def json2list(jsonstr):
"""
Used to modify a json string to a python list.
"""
if jsonstr == "":
valueList = []
else:
if jsonstr[0] == "[":
valueList = json2py(jsonstr)
else:
valueList = jsonstr.split(",")
if not isinstance(valueList, list):
valueList = [valueList]
return valueList
survey_json2list = json2list
# =============================================================================
class S3SurveyTemplateModel(S3Model):
"""
Template model
The template model is a container for the question model
"""
names = ["survey_template",
"survey_template_id",
"survey_section",
"survey_template_status",
]
def model(self):
T = current.T
db = current.db
template_status = {
1: T("Pending"),
2: T("Active"),
3: T("Closed"),
4: T("Master")
}
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_template
#
# The template is the root table and acts as a container for
# the questions that will be used in a survey.
tablename = "survey_template"
table = define_table(tablename,
Field("name", "string", length=120,
notnull=True, unique=True,
label = T("Template Name"),
default="",
),
Field("description", "text", length=500,
label = T("Description"),
default=""),
Field("status", "integer",
label = T("Status"),
requires = IS_IN_SET(template_status,
zero=None),
default=1,
represent = lambda index: \
template_status[index],
readable=True,
writable=False),
# Standard questions which may belong to all template
# competion_qstn: who completed the assessment
Field("competion_qstn", "string", length=200,
label = T("Completion Question"),
),
# date_qstn: when it was completed (date)
Field("date_qstn", "string", length=200,
label = T("Date Question"),
),
# time_qstn: when it was completed (time)
Field("time_qstn", "string", length=200,
label = T("Time Question"),
),
# location_detail: json of the location question
# May consist of any of the following:
# L0, L1, L2, L3, L4, Lat, Lon
Field("location_detail", "string", length=200,
label = T("Location Detail"),
),
# The priority question is the default question used
# to determine the priority of each point on the map.
# The data is stored as the question code.
Field("priority_qstn", "string", length=16,
label = T("Default map question"),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Assessment Template"),
title_display = T("Assessment Template Details"),
title_list = T("Assessment Templates"),
title_analysis_summary = T("Template Summary"),
title_update = T("Edit Assessment Template"),
title_question_details = T("Details of each question in the Template"),
subtitle_create = T("Add a new Assessment Template"),
subtitle_analysis_summary = T("Summary by Question Type - (The fewer text questions the better the analysis can be)"),
label_list_button = T("List Assessment Templates"),
label_create_button = T("Add a new Assessment Template"),
label_delete_button = T("Delete this Assessment Template"),
msg_record_created = T("Assessment Template added"),
msg_record_modified = T("Assessment Template updated"),
msg_record_deleted = T("Assessment Template deleted"),
msg_list_empty = T("No Assessment Templates"))
template_id = S3ReusableField("template_id", table,
sortby="name",
label=T("Template"),
requires = IS_ONE_OF(db,
"survey_template.id",
self.survey_template_represent,
),
represent = self.survey_template_represent,
ondelete = "CASCADE")
# Components
add_component("survey_series", survey_template="template_id")
add_component("survey_translate", survey_template = "template_id")
configure(tablename,
onvalidation = self.template_onvalidate,
onaccept = self.template_onaccept,
deduplicate = self.survey_template_duplicate,
)
# ---------------------------------------------------------------------
# survey_sections
#
# The questions can be grouped into sections this provides
# the description of the section and
# the position of the section within the template
tablename = "survey_section"
table = define_table(tablename,
Field("name", "string", length=120,
notnull=True,
default="",
),
Field("description", "text", length=500,
default="",
),
Field("posn", "integer",
),
Field("cloned_section_id", "integer",
readable=False,
writable=False,
),
template_id(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Template Section"),
title_display = T("Template Section Details"),
title_list = T("Template Sections"),
title_update = T("Edit Template Section"),
subtitle_create = T("Add a new Template Section"),
label_list_button = T("List Template Sections"),
label_create_button = T("Add a new Template Section"),
label_delete_button = T("Delete this Template Section"),
msg_record_created = T("Template Section added"),
msg_record_modified = T("Template Section updated"),
msg_record_deleted = T("Template Section deleted"),
msg_list_empty = T("No Template Sections"))
configure(tablename, orderby = tablename+".posn",
deduplicate=self.survey_section_duplicate
)
# Pass names back to global scope (s3.*)
return Storage(
survey_template_id = template_id,
survey_template_status = template_status,
)
# -------------------------------------------------------------------------
@staticmethod
def template_onvalidate(form):
"""
It is not valid to re-import a template that already has a
status of Active or higher
"""
template_id = form.vars.id
table = current.s3db.survey_template
row = current.db(table.id == template_id).select(table.status,
limitby=(0, 1)
).first()
if row is not None and row.status > 1:
return False
return True
# -------------------------------------------------------------------------
@staticmethod
def addQuestion(template_id, name, code, notes, type, posn, metadata={}):
"""
"""
db = current.db
s3db = current.s3db
# Add the question to the database if it's not already there
qstntable = s3db.survey_question
query = (qstntable.name == name) & \
(qstntable.code == code)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if record:
qstn_id = record.id
else:
qstn_id = qstntable.insert(name = name,
code = code,
notes = notes,
type = type
)
qstn_metadata_table = s3db.survey_question_metadata
for (descriptor, value) in metadata.items():
qstn_metadata_table.insert(question_id = qstn_id,
descriptor = descriptor,
value = value
)
# Add these questions to the section: "Background Information"
sectable = s3db.survey_section
section_name = "Background Information"
query = (sectable.name == section_name) & \
(sectable.template_id == template_id)
record = db(query).select(sectable.id, limitby=(0, 1)).first()
if record:
section_id = record.id
else:
section_id = sectable.insert(name = section_name,
template_id = template_id,
posn = 0 # special section with no position
)
# Add the question to the list of questions in the template
qstn_list_table = s3db.survey_question_list
query = (qstn_list_table.question_id == qstn_id) & \
(qstn_list_table.template_id == template_id)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if not record:
qstn_list_table.insert(question_id = qstn_id,
template_id = template_id,
section_id = section_id,
posn = posn
)
# -------------------------------------------------------------------------
@staticmethod
def template_onaccept(form):
"""
All of the standard questions will now be generated
competion_qstn: who completed the assessment
date_qstn: when it was completed (date)
time_qstn: when it was completed (time)
location_detail: json of the location question
May consist of any of the following:
L0, L1, L2, L3, L4, Lat, Lon
for json entry a question will be generated
The code for each question will start with "STD-" followed by
the type of question.
"""
vars = form.vars
if vars.id:
template_id = vars.id
else:
return
addQuestion = S3SurveyTemplateModel.addQuestion
if vars.competion_qstn != None:
name = vars.competion_qstn
code = "STD-WHO"
notes = "Who completed the assessment"
type = "String"
posn = -10 # negative used to force these question to appear first
addQuestion(template_id, name, code, notes, type, posn)
if vars.date_qstn != None:
name = vars.date_qstn
code = "STD-DATE"
notes = "Date the assessment was completed"
type = "Date"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.time_qstn != None:
name = vars.time_qstn
code = "STD-TIME"
notes = "Time the assessment was completed"
type = "Time"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.location_detail != None:
locationList = json2py(vars.location_detail)
if len(locationList) > 0:
name = "The location P-code"
code = "STD-P-Code"
type = "String"
posn += 1
addQuestion(template_id, name, code, None, type, posn)
for loc in locationList:
if loc == "Lat":
name = "Latitude"
elif loc == "Lon":
name = "Longitude"
else:
name = loc
code = "STD-%s" % loc
if loc == "Lat" or loc == "Lon":
type = "Numeric"
metadata = {"Format": "nnn.nnnnnn"}
else:
type = "Location"
metadata = {}
posn += 1
addQuestion(template_id, name, code, "", type, posn, metadata)
# -------------------------------------------------------------------------
@staticmethod
def survey_template_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_template":
table = job.table
data = job.data
name = "name" in data and data.name
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_section_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name
- the same template
- and the same position within the template
- however if their is a record with position of zero then that record should be updated
"""
if job.tablename == "survey_section":
table = job.table
data = job.data
name = "name" in data and data.name
template = "template_id" in data and data.template_id
query = (table.name == name) & \
(table.template_id == template)
return duplicator(job, query)
# =============================================================================
def survey_template_represent(id, row=None):
"""
Display the template name rather than the id
"""
if row:
return row.name
elif not id:
return current.messages["NONE"]
table = current.s3db.survey_template
query = (table.id == id)
record = current.db(query).select(table.name,
limitby=(0, 1)).first()
try:
return record.name
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def survey_template_rheader(r, tabs=[]):
"""
The template rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if tablename == "survey_template" and record:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Basic Details"), "read"),
(T("Question Details"),"templateRead/"),
(T("Question Summary"),"templateSummary/"),
#(T("Sections"), "section"),
]
if current.auth.s3_has_permission("create", "survey_translate"):
tabs.append((T("Translate"),"translate"))
rheader_tabs = s3_rheader_tabs(r, tabs)
sectionTable = s3db.survey_section
qlistTable = s3db.survey_question_list
viewing = current.request.get_vars.get("viewing", None)
if viewing:
dummy, template_id = viewing.split(".")
else:
template_id = r.id
query = (qlistTable.template_id == template_id) & \
(qlistTable.section_id == sectionTable.id)
rows = current.db(query).select(sectionTable.id,
sectionTable.name,
orderby = qlistTable.posn)
tsection = TABLE(_class="survey-section-list")
lblSection = SPAN(T("Sections that are part of this template"),
_style="font-weight:bold;")
if (rows.__len__() == 0):
rsection = SPAN(T("As of yet, no sections have been added to this template."))
else:
rsection = TR()
count = 0
lastSection = ""
for section in rows:
if section.name == lastSection:
continue
rsection.append(TD(section.name))
# Comment out the following until templates can be built online
#rsection.append(TD(A(section.name,
# _href=URL(c="survey",
# f="section",
# args="%s" % section.id))))
lastSection = section.name
count += 1
if count % 4 == 0:
tsection.append(rsection)
rsection=TR()
tsection.append(rsection)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_template_status[record.status],
),
),
lblSection,
tsection,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getTemplateFromSeries(series_id):
"""
Return the template data from the series_id passed in
@ToDo: Remove wrapper
"""
stable = current.s3db.survey_series
ttable = current.s3db.survey_template
query = (stable.id == series_id) & \
(ttable.id == stable.template_id)
row = current.db(query).select(ttable.ALL,
limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getAllTemplates():
"""
Function to return all the templates on the database
@ToDo: Remove wrapper
"""
table = current.s3db.survey_template
rows = current.db(table).select()
return rows
# =============================================================================
def survey_getAllWidgetsForTemplate(template_id):
"""
Function to return the widgets for each question for the given
template. The widgets are returned in a dict with the key being
the question code.
"""
s3db = current.s3db
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.type,
q_ltable.posn,
)
widgets = {}
for row in rows:
sqrow = row.survey_question
qstnType = sqrow.type
qstn_id = sqrow.id
qstn_code = sqrow.code
qstn_posn = row.survey_question_list.posn
widgetObj = survey_question_type[qstnType](qstn_id)
widgets[qstn_code] = widgetObj
widgetObj.question["posn"] = qstn_posn
question = {}
return widgets
# =============================================================================
def survey_getAllSectionsForSeries(series_id):
"""
Function to return the list of sections for the given series
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
row = survey_getSeries(series_id)
template_id = row.template_id
return survey_getAllSectionsForTemplate(template_id)
# =============================================================================
def survey_buildQuestionnaireFromTemplate(template_id):
"""
Build a form displaying all the questions for a given template_id
@ToDo: Remove wrapper
"""
questions = survey_getAllQuestionsForTemplate(template_id)
return buildQuestionsForm(questions, readOnly=True)
# =============================================================================
def survey_getAllSectionsForTemplate(template_id):
"""
function to return the list of sections for the given template
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
sectable = current.s3db.survey_section
query = (sectable.template_id == template_id)
rows = current.db(query).select(sectable.id,
sectable.name,
sectable.template_id,
sectable.posn,
orderby = sectable.posn)
sections = []
for sec in rows:
sections.append({"section_id": sec.id,
"name" : sec.name,
"template_id": sec.template_id,
"posn" : sec.posn
}
)
return sections
# =============================================================================
def survey_getWidgetFromQuestion(question_id):
"""
Function that gets the right widget for the question
"""
qtable = current.s3db.survey_question
query = (qtable.id == question_id)
question = current.db(query).select(qtable.type,
limitby=(0, 1)).first()
qstnType = question.type
widgetObj = survey_question_type[qstnType](question_id)
return widgetObj
# =============================================================================
def buildQuestionsForm(questions, complete_id=None, readOnly=False):
"""
Create the form, hard-coded table layout :(
"""
form = FORM()
table = None
sectionTitle = ""
for question in questions:
if sectionTitle != question["section"]:
if sectionTitle != "":
form.append(P())
form.append(HR(_width="90%"))
form.append(P())
div = DIV(_class="survey_scrollable")
table = TABLE()
div.append(table)
form.append(div)
table.append(TR(TH(question["section"],
_colspan="2"),
_class="survey_section"))
sectionTitle = question["section"]
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if readOnly:
table.append(TR(TD(question["code"]),
TD(widgetObj.type_represent()),
TD(question["name"])
)
)
else:
if complete_id != None:
widgetObj.loadAnswer(complete_id, question["qstn_id"])
widget = widgetObj.display(question_id = question["qstn_id"])
if widget != None:
if isinstance(widget, TABLE):
table.append(TR(TD(widget, _colspan=2)))
else:
table.append(widget)
if not readOnly:
button = INPUT(_type="submit", _name="Save", _value=current.T("Save"))
form.append(button)
return form
# =============================================================================
def survey_build_template_summary(template_id):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="template_summary",
_class="dataTable display")
hr = TR(TH(T("Position")), TH(T("Section")))
qstnTypeList = {}
posn = 1
for (key, type) in survey_question_type.items():
if key == "Grid" or key == "GridChild":
continue
hr.append(TH(type().type_represent()))
qstnTypeList[key] = posn
posn += 1
hr.append(TH(T("Total")))
header = THEAD(hr)
numOfQstnTypes = len(survey_question_type) - 1 # exclude the grid questions
questions = survey_getAllQuestionsForTemplate(template_id)
sectionTitle = ""
line = []
body = TBODY()
section = 0
total = ["", T("Total")] + [0]*numOfQstnTypes
for question in questions:
if sectionTitle != question["section"]:
if line != []:
br = TR()
for cell in line:
br.append(cell)
body.append(br)
section += 1
sectionTitle = question["section"]
line = [section, sectionTitle] + [0]*numOfQstnTypes
if question["type"] == "Grid":
continue
if question["type"] == "GridChild":
# get the real grid question type
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
question["type"] = widgetObj.typeDescription
line[qstnTypeList[question["type"]]+1] += 1
line[numOfQstnTypes+1] += 1
total[qstnTypeList[question["type"]]+1] += 1
total[numOfQstnTypes+1] += 1
# Add the trailing row
br = TR()
for cell in line:
br.append(cell)
body.append(br)
# Add the footer to the table
foot = TFOOT()
tr = TR()
for cell in total:
tr.append(TD(B(cell))) # don't use TH() otherwise dataTables will fail
foot.append(tr)
table.append(header)
table.append(body)
table.append(foot)
# Turn off server side pagination
s3 = current.response.s3
s3.no_sspag = True
s3.no_formats = True
s3.dataTableID = None
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"template_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
dt_action_col = -1,
**attr
)
return form
# =============================================================================
class S3SurveyQuestionModel(S3Model):
"""
Question Model
"""
names = ["survey_question",
"survey_question_metadata",
"survey_question_list",
"survey_qstn_name_represent"
]
def model(self):
T = current.T
s3 = current.response.s3
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_question
# Defines a question that will appear within a section, and thus belong
# to the template.
#
# This holds the actual question and
# A string code (unique within the template) is used to identify the question.
#
# It will have a type from the questionType dictionary.
# This type will determine the options that can be associated with it.
# A question can belong to many different sections.
# The notes are to help the enumerator and will typically appear as a
# footnote in the printed form.
tablename = "survey_question"
table = define_table(tablename,
Field("name", "string", length=200,
notnull=True,
represent = self.qstn_name_represent,
),
Field("code", "string", length=16,
notnull=True,
),
Field("notes", "string", length=400
),
Field("type", "string", length=40,
notnull=True,
),
Field("metadata", "text",
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add an Assessment Question"),
title_display = T("Assessment Question Details"),
title_list = T("Assessment Questions"),
title_update = T("Edit Assessment Question"),
subtitle_create = T("Add a new Assessment Question"),
label_list_button = T("List Assessment Questions"),
label_create_button = T("Add a new Assessment Question"),
label_delete_button = T("Delete this Assessment Question"),
msg_record_created = T("Assessment Question added"),
msg_record_modified = T("Assessment Question updated"),
msg_record_deleted = T("Assessment Question deleted"),
msg_list_empty = T("No Assessment Questions"))
configure(tablename,
onvalidation = self.question_onvalidate,
onaccept = self.question_onaccept,
deduplicate = self.survey_question_duplicate,
)
# ---------------------------------------------------------------------
# survey_question_metadata
# referenced by
# the survey_question table and is used to manage
# the metadata that will be associated with a question type.
# For example: if the question type is option, then valid metadata
# might be:
# count: the number of options that will be presented: 3
# 1 : the first option : Female
# 2 : the second option : Male
# 3 : the third option : Not Specified
# So in the above case a question record will be associated with four
# question_metadata records.
tablename = "survey_question_metadata"
table = define_table(tablename,
Field("question_id",
"reference survey_question",
readable=False,
writable=False
),
Field("descriptor",
"string",
length=20,
notnull=True,
),
Field("value",
"text",
notnull=True,
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Question Meta-Data"),
title_display = T("Question Meta-Data Details"),
title_list = T("Question Meta-Data"),
title_update = T("Edit Question Meta-Data"),
subtitle_create = T("Add new Question Meta-Data"),
label_list_button = T("List Question Meta-Data"),
label_create_button = T("Add new Question Meta-Data"),
label_delete_button = T("Delete this Question Meta-Data"),
msg_record_created = T("Question Meta-Data added"),
msg_record_modified = T("Question Meta-Data updated"),
msg_record_deleted = T("Question Meta-Data deleted"),
msg_list_empty = T("No Question Meta-Data"),
title_upload = T("Upload a Question List import file")
)
configure(tablename,
deduplicate = self.survey_question_metadata_duplicate
)
# -------------------------------------------------------------------------
# The survey_question_list table is a resolver between
# the survey_question and the survey_section tables.
#
# Along with ids mapping back to these tables
# it will have a code that can be used to reference the question
# it will have the position that the question will appear in the template
tablename = "survey_question_list"
table = define_table(tablename,
Field("posn",
"integer",
notnull=True,
),
self.survey_template_id(),
Field("question_id",
"reference survey_question",
readable=False,
writable=False
),
Field("section_id",
"reference survey_section",
readable=False,
writable=False
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_upload = T("Upload an Assessment Template import file")
)
configure(tablename,
onaccept = self.question_list_onaccept,
deduplicate = self.survey_question_list_duplicate,
)
# Pass names back to global scope (s3.*)
# ---------------------------------------------------------------------
return Storage(
survey_qstn_name_represent = self.qstn_name_represent
)
# -------------------------------------------------------------------------
@staticmethod
def qstn_name_represent(value):
"""
Return the question name, for locations in the gis hierarchy
the localised name will be returned
"""
if value == "L0" or value == "L1" or \
value == "L2" or value == "L3" or value == "L4":
return current.gis.get_location_hierarchy(value)
else:
return value
# -------------------------------------------------------------------------
@staticmethod
def question_onvalidate(form):
"""
Any text with the metadata that is imported will be held in
single quotes, rather than double quotes and so these need
to be escaped to double quotes to make it valid JSON
"""
from xml.sax.saxutils import unescape
metadata = form.vars.metadata
if metadata != None:
metadata = unescape(metadata, {"'":'"'})
return True
# -------------------------------------------------------------------------
@staticmethod
def question_onaccept(form):
"""
All of the question metadata will be stored in the metadata
field in a JSON format.
They will then be inserted into the survey_question_metadata
table pair will be a record on that table.
"""
vars = form.vars
if vars.metadata is None:
return
if vars.id:
record = current.s3db.survey_question[vars.id]
else:
return
if vars.metadata and \
vars.metadata != "":
survey_updateMetaData(record,
vars.type,
vars.metadata
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question code
"""
if job.tablename == "survey_question":
table = job.table
data = job.data
code = "code" in data and data.code
query = (table.code == code)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_metadata_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question_id and descriptor
"""
if job.tablename == "survey_question_metadata":
table = job.table
data = job.data
question = "question_id" in data and data.question_id
descriptor = "descriptor" in data and data.descriptor
query = (table.descriptor == descriptor) & \
(table.question_id == question)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def question_list_onaccept(form):
"""
If a grid question is added to the the list then all of the
grid children will need to be added as well
"""
qstntable = current.s3db.survey_question
try:
vars = form.vars
question_id = vars.question_id
template_id = vars.template_id
section_id = vars.section_id
posn = vars.posn
except:
return
record = qstntable[question_id]
try:
type = record.type
except:
_debug("survey question missing type: %s" % record)
return
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
if type == "Location":
widgetObj = survey_question_type["Location"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_list_duplicate(job):
"""
Rules for finding a duplicate:
- The template_id, question_id and section_id are the same
"""
if job.tablename == "survey_question_list":
table = job.table
data = job.data
tid = "template_id" in data and data.template_id
qid = "question_id" in data and data.question_id
sid = "section_id" in data and data.section_id
query = (table.template_id == tid) & \
(table.question_id == qid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQuestionFromCode(code, series_id=None):
"""
Function to return the question for the given series
with the code that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
if series_id != None:
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
else:
query = (q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
question = {}
if record != None:
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_getAllQuestionsForTemplate(template_id):
"""
Function to return the list of questions for the given template
The questions are returned in the order of their position in the
template.
The data on a question that it returns is as follows:
qstn_id, code, name, type, posn, section
"""
s3db = current.s3db
sectable = s3db.survey_section
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == sectable.id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
sectable.name,
q_ltable.posn,
orderby=(q_ltable.posn))
questions = []
for row in rows:
question = {}
sq = row.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = s3db.survey_qstn_name_represent(sq.name)
question["type"] = sq.type
question["posn"] = row.survey_question_list.posn
question["section"] = row.survey_section.name
questions.append(question)
return questions
# =============================================================================
def survey_getAllQuestionsForSeries(series_id):
"""
Function to return the list of questions for the given series
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
questions = survey_getAllQuestionsForTemplate(template_id)
return questions
# =============================================================================
def survey_getAllQuestionsForComplete(complete_id):
"""
Function to return a tuple of the list of questions and series_id
for the given completed_id
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_complete
row = current.db(table.id == complete_id).select(table.series_id,
limitby=(0, 1)).first()
series_id = row.series_id
questions = survey_getAllQuestionsForSeries(series_id)
return (questions, series_id)
# =============================================================================
def survey_get_series_questions_of_type(questionList, type):
"""
"""
if isinstance(type, (list, tuple)):
types = type
else:
types = (type)
questions = []
for question in questionList:
if question["type"] in types:
questions.append(question)
elif question["type"] == "Link" or \
question["type"] == "GridChild":
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if widgetObj.getParentType() in types:
question["name"] = widgetObj.fullName()
questions.append(question)
return questions
# =============================================================================
def survey_getQuestionFromName(name, series_id):
"""
Function to return the question for the given series
with the name that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.name == name)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
if record == None:
# Unable to get the record from the question name
# It could be because the question is a location
# So get the location names and then check
locList = current.gis.get_all_current_levels()
for row in locList.items():
if row[1] == name:
return survey_getQuestionFromName(row[0],series_id)
question = {}
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_updateMetaData (record, type, metadata):
"""
"""
metatable = current.s3db.survey_question_metadata
id = record.id
# the metadata can either be passed in as a JSON string
# or as a parsed map. If it is a string load the map.
if isinstance(metadata, str):
metadataList = json2py(metadata)
else:
metadataList = metadata
for (desc, value) in metadataList.items():
desc = desc.strip()
if not isinstance(value, str):
# web2py stomps all over a list so convert back to a string
# before inserting it on the database
value = json.dumps(value)
value = value.strip()
metatable.insert(question_id = id,
descriptor = desc,
value = value
)
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildren(record, metadataList)
# =============================================================================
class S3SurveyFormatterModel(S3Model):
"""
The survey_formatter table defines the order in which the questions
will be laid out when a formatted presentation is used.
The idea is to be able to present the questions in a format that
best uses the available space and is familiar to those using the
tool.
Examples of formatted presentation are the spreadsheet and the web
form. This may be extended to PDF documents.
The rules are held as a JSON record and describe where each question
within the section should appear in terms of rows and columns. Each
question is referenced by the question code.
For example assume a section with the following eight questions:
QSTN_1, QSTN_2, QSTN_3, QSTN_4, QSTN_5, QSTN_6, QSTN_7, QSTN_8
Then to display them in three rows:
[[QSTN_1, QSTN_2, QSTN_3], [QSTN_4, QSTN_5, QSTN_6], [QSTN_7, QSTN_8]]
would present it as follows:
QSTN_1, QSTN_2, QSTN_3,
QSTN_4, QSTN_5, QSTN_6,
QSTN_7, QSTN_8
The order of the questions does not need to be preserved, thus:
[[QSTN_1, QSTN_2], [QSTN_4, QSTN_5, QSTN_3], [QSTN_7, QSTN_8, QSTN_6]]
would be valid, and give:
QSTN_1, QSTN_2,
QSTN_4, QSTN_5, QSTN_3,
QSTN_7, QSTN_8, QSTN_6,
***NOTE***
When importing this record with a CSV file the question code will be
single quoted, rather than double quoted which JSON requires.
This is because the whole rule needs to be double quoted. Code that
extracts the records from the table will then need to change all
single quotes to double quotes. This can be done as follows:
rowList = json2py(rules)
"""
names = ["survey_formatter"]
def model(self):
T = current.T
survey_formatter_methods = {
1: T("Default"),
2: T("Web Form"),
3: T("Spreadsheet"),
4: T("PDF"),
}
# ---------------------------------------------------------------------
tablename = "survey_formatter"
table = self.define_table(tablename,
self.survey_template_id(),
Field("section_id", "reference survey_section",
readable=False,
writable=False
),
Field("method", "integer",
requires = IS_IN_SET(survey_formatter_methods,
zero=None),
default=1,
represent = lambda index: \
survey_formatter_methods[index],
readable=True,
writable=False),
Field("rules", "text", default=""),
*s3_meta_fields()
)
self.configure(tablename,
onaccept = self.formatter_onaccept,
deduplicate=self.survey_formatter_duplicate
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def formatter_onaccept(form):
"""
If this is the formatter rules for the Background Information
section then add the standard questions to the layout
"""
s3db = current.s3db
section_id = form.vars.section_id
sectionTbl = s3db.survey_section
section_name = sectionTbl[section_id].name
if section_name == "Background Information":
col1 = []
# Add the default layout
templateTbl = s3db.survey_template
template = templateTbl[form.vars.template_id]
if template.competion_qstn != "":
col1.append("STD-WHO")
if template.date_qstn != "":
col1.append("STD-DATE")
if template.time_qstn != "":
col1.append("STD-TIME")
if "location_detail" in template:
col2 = ["STD-P-Code"]
locationList = json2py(template.location_detail)
for loc in locationList:
col2.append("STD-%s" % loc)
col = [col1, col2]
rule = [{"columns":col}]
ruleList = json2py(form.vars.rules)
ruleList[:0]=rule
rules = json.dumps(ruleList)
db = current.db
ftable = db.survey_formatter
db(ftable.id == form.vars.id).update(rules = rules)
# -------------------------------------------------------------------------
@staticmethod
def survey_formatter_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same template_id and section_id
"""
if job.tablename == "survey_formatter":
table = job.table
data = job.data
tid = "template_id" in data and data.template_id
sid = "section_id" in data and data.section_id
query = (table.template_id == tid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQstnLayoutRules(template_id,
section_id,
method = 1
):
"""
This will return the rules for laying out the questions for
the given section within the template.
This is used when generating a formatted layout.
First it will look for a survey_formatter record that matches
the method given. Failing that it will look for a default
survey_formatter record. If no appropriate survey_formatter
record exists for the section then it will use the posn
field found in the survey_question_list record.
The function will return a list of rows. Each row is a list
of question codes.
"""
db = current.db
s3db = current.s3db
# search for layout rules on the survey_formatter table
fmttable = s3db.survey_formatter
query = (fmttable.template_id == template_id) & \
(fmttable.section_id == section_id)
rows = db(query).select(fmttable.method,
fmttable.rules)
rules = None
drules = None # default rules
for row in rows:
if row.method == method:
rules = row.rules
break
elif row.method == 1:
drules = row.rules
if rules == None and drules != None:
rules = drules
rowList = []
if rules is None or rules == "":
# get the rules from survey_question_list
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == section_id) & \
(q_ltable.question_id == qsntable.id)
rows = db(query).select(qsntable.code,
q_ltable.posn,
orderby=(q_ltable.posn))
append = rowList.append
for qstn in rows:
append([qstn.survey_question.code])
else:
# convert the JSON rules to python
rowList = json2py(rules)
return rowList
# =============================================================================
class S3SurveySeriesModel(S3Model):
"""
Series Model
"""
names = ["survey_series",
"survey_series_status",
]
def model(self):
T = current.T
person_id = self.pr_person_id
pr_person_comment = self.pr_person_comment
organisation_id = self.org_organisation_id
s3_date_represent = S3DateTime.date_represent
s3_date_format = current.deployment_settings.get_L10n_date_format()
crud_strings = current.response.s3.crud_strings
set_method = self.set_method
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
# ---------------------------------------------------------------------
# The survey_series table is used to hold all uses of a template
#
# When a series is first created the template status will change from
# Pending to Active and at the stage no further changes to the
# template can be made.
#
# Typically a series will be created for an event, which may be a
# response to a natural disaster, an exercise,
# or regular data collection activity.
#
# The series is a container for all the responses for the event
series_status = {
1: T("Active"),
2: T("Closed"),
}
tablename = "survey_series"
table = self.define_table(tablename,
Field("name", "string", length=120,
default="",
requires = IS_NOT_EMPTY()),
Field("description", "text", default="", length=500),
Field("status", "integer",
requires = IS_IN_SET(series_status,
zero=None),
default=1,
represent = lambda index: series_status[index],
readable=True,
writable=False),
self.survey_template_id(empty=False,
ondelete="RESTRICT"),
person_id(),
organisation_id(widget = org_widget),
Field("logo", "string", default="", length=512),
Field("language", "string", default="en", length=8),
Field("start_date", "date",
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
default=None),
Field("end_date", "date",
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
default=None),
#self.super_link("source_id", "doc_source_entity"),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Conduct a Disaster Assessment"),
title_display = T("Details of Disaster Assessment"),
title_list = T("Disaster Assessments"),
title_update = T("Edit this Disaster Assessment"),
title_analysis_summary = T("Disaster Assessment Summary"),
title_analysis_chart = T("Disaster Assessment Chart"),
title_map = T("Disaster Assessment Map"),
subtitle_create = T("Add a new Disaster Assessment"),
subtitle_analysis_summary = T("Summary of Completed Assessment Forms"),
help_analysis_summary = T("Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms"),
subtitle_analysis_chart = T("Select a label question and at least one numeric question to display the chart."),
subtitle_map = T("Disaster Assessment Map"),
label_list_button = T("List Disaster Assessments"),
label_create_button = T("Add a new Disaster Assessment"),
label_delete_button = T("Delete this Disaster Assessment"),
msg_record_created = T("Disaster Assessment added"),
msg_record_modified = T("Disaster Assessment updated"),
msg_record_deleted = T("Disaster Assessment deleted"),
msg_list_empty = T("No Disaster Assessments"))
self.configure(tablename,
create_next = URL(f="newAssessment",
vars={"viewing":"survey_series.[id]"}),
onaccept = self.series_onaccept,
deduplicate = self.survey_series_duplicate,
)
# Components
self.add_component("survey_complete", survey_series="series_id")
# Custom Methods
set_method("survey", "series", method="summary", action=self.seriesSummary)
set_method("survey", "series", method="graph", action=self.seriesGraph)
set_method("survey", "series", method="map", action=self.seriesMap)
set_method("survey", "series",
method="series_chart_download",
action=self.seriesChartDownload
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return Storage(
survey_series_status = series_status,
)
# -------------------------------------------------------------------------
@staticmethod
def series_onaccept(form):
"""
Ensure that the template status is set to Active
"""
if form.vars.template_id:
template_id = form.vars.template_id
else:
return
table = current.s3db.survey_template
current.db(table.id == template_id).update(status = 2)
# -------------------------------------------------------------------------
@staticmethod
def survey_series_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_series":
table = job.table
data = job.data
name = "name" in data and data.name
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def seriesSummary(r, **attr):
"""
"""
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
posn_offset = 11
# Retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
if request.env.request_method == "POST" \
or "mode" in request.vars:
# This means that the user has selected the questions and
# Wants to display the details of the selected questions
crud_strings = s3.crud_strings["survey_complete"]
question_ids = []
vars = request.vars
if "mode" in vars:
mode = vars["mode"]
series_id = r.id
if "selected" in vars:
selected = vars["selected"].split(",")
else:
selected = []
q_ltable = s3db.survey_question_list
sertable = s3db.survey_series
query = (sertable.id == series_id) & \
(sertable.template_id == q_ltable.template_id)
questions = db(query).select(q_ltable.posn,
q_ltable.question_id,
orderby = q_ltable.posn)
for question in questions:
qstn_posn = question.posn + posn_offset
if mode == "Inclusive":
if str(qstn_posn) in selected:
question_ids.append(str(question.question_id))
elif mode == "Exclusive":
if str(qstn_posn) not in selected:
question_ids.append(str(question.question_id))
items = buildCompletedList(series_id, question_ids)
if r.representation == "xls":
from ..s3.codecs.xls import S3XLS
exporter = S3XLS()
return exporter.encode(items,
title=crud_strings.title_selected,
use_colour=False
)
if r.representation == "html":
table = buildTableFromCompletedList(items)
#exporter = S3Exporter()
#table = exporter.html(items)
output["items"] = table
output["sortby"] = [[0, "asc"]]
url_pdf = URL(c="survey", f="series",
args=[series_id, "summary.pdf"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
url_xls = URL(c="survey", f="series",
args=[series_id, "summary.xls"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
s3.formats["pdf"] = url_pdf
s3.formats["xls"] = url_xls
else:
output["items"] = None
output["title"] = crud_strings.title_selected
output["subtitle"] = crud_strings.subtitle_selected
output["help"] = ""
else:
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
form = buildSeriesSummary(series_id, posn_offset)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
output["help"] = crud_strings.help_analysis_summary
s3.dataTableBulkActionPosn = "top"
s3.actions = None
current.response.view = "survey/series_summary.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def getChartName():
"""
Create a Name for a Chart
"""
import hashlib
vars = current.request.vars
end_part = "%s_%s" % (vars.numericQuestion,
vars.labelQuestion)
h = hashlib.sha256()
h.update(end_part)
encoded_part = h.hexdigest()
chartName = "survey_series_%s_%s" % (vars.series, encoded_part)
return chartName
# -------------------------------------------------------------------------
@staticmethod
def seriesChartDownload(r, **attr):
"""
"""
from gluon.contenttype import contenttype
series_id = r.id
seriesName = survey_getSeriesName(series_id)
filename = "%s_chart.png" % seriesName
response = current.response
response.headers["Content-Type"] = contenttype(".png")
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
chartFile = S3SurveySeriesModel.getChartName()
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
# The cached version doesn't exist so regenerate it
output = dict()
vars = current.request.get_vars
if "labelQuestion" in vars:
labelQuestion = vars.labelQuestion
if "numericQuestion" in vars:
numQstnList = vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion, outputFormat="png")
return output["chart"]
# -------------------------------------------------------------------------
@staticmethod
def seriesGraph(r, **attr):
"""
Allows the user to select one string question and multiple numeric
questions. The string question is used to group the numeric data,
with the result displayed as a bar chart.
For example:
The string question can be Geographic area, and the numeric
questions could be people injured and families displaced.
Then the results will be grouped by each geographical area.
"""
T = current.T
request = current.request
s3 = current.response.s3
output = dict()
# Draw the chart
vars = request.vars
if "viewing" in vars:
dummy, series_id = vars.viewing.split(".")
elif "series" in vars:
series_id = vars.series
else:
series_id = r.id
chartFile = S3SurveySeriesModel.getChartName()
cachePath = S3Chart.getCachedPath(chartFile)
if cachePath and request.ajax:
return IMG(_src=cachePath)
else:
numQstnList = None
labelQuestion = None
post_vars = request.post_vars
if post_vars is not None:
if "labelQuestion" in post_vars:
labelQuestion = post_vars.labelQuestion
if "numericQuestion" in post_vars:
numQstnList = post_vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion)
if request.ajax == True and "chart" in output:
return output["chart"]
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output["rheader"] = rheader
# ---------------------------------------------------------------------
def addQstnChkboxToTR(numQstnList, qstn):
"""
Build the form
"""
tr = TR()
if numQstnList != None and qstn["code"] in numQstnList:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
value=True,
)
)
else:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
)
)
tr.append(LABEL(qstn["name"]))
return tr
if series_id == None:
return output
allQuestions = survey_getAllQuestionsForSeries(series_id)
labelTypeList = ("String",
"Option",
"YesNo",
"YesNoDontKnow",
"Location",
)
labelQuestions = survey_get_series_questions_of_type (allQuestions, labelTypeList)
lblQstns = []
for question in labelQuestions:
lblQstns.append(question["name"])
numericTypeList = ("Numeric")
form = FORM(_id="mapGraphForm")
table = TABLE()
labelQstn = SELECT(lblQstns, _name="labelQuestion", value=labelQuestion)
table.append(TR(TH("%s:" % T("Select Label Question")), _class="survey_question"))
table.append(labelQstn)
table.append(TR(TH(T("Select Numeric Questions (one or more):")), _class="survey_question"))
# First add the special questions
specialQuestions = [{"code":"Count", "name" : T("Number of Completed Assessment Forms")}]
innerTable = TABLE()
for qstn in specialQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
# Now add the numeric questions
numericQuestions = survey_get_series_questions_of_type (allQuestions, numericTypeList)
innerTable = TABLE()
for qstn in numericQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
form.append(table)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
button = INPUT(_type="button", _id="chart_btn", _name="Chart", _value=T("Display Chart"))
form.append(series)
form.append(button)
# Set up the javascript code for ajax interaction
jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
s3.jquery_ready.append('''
$('#chart_btn').click(function(){
var data=$('#mapGraphForm').serialize()
var url='<a class="action-btn" href=series_chart_download?' + data + '>Download Chart</a>'
$.post('%s',data,function(data){
$('#survey_chart').empty();
$('#survey_chart').append(data);
$('#survey_chart_download').empty();
$('#survey_chart_download').append(url);
});
});
''' % jurl)
output["showForm"] = P(T("Click on the chart to show/hide the form."))
output["form"] = form
output["title"] = s3.crud_strings["survey_series"].title_analysis_chart
current.response.view = "survey/series_analysis.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def drawChart(output, series_id, numQstnList, labelQuestion, outputFormat=None):
"""
"""
T = current.T
getAnswers = survey_getAllAnswersForQuestionInSeries
gqstn = survey_getQuestionFromName(labelQuestion, series_id)
gqstn_id = gqstn["qstn_id"]
ganswers = getAnswers(gqstn_id, series_id)
dataList = []
legendLabels = []
for numericQuestion in numQstnList:
if numericQuestion == "Count":
# get the count of replies for the label question
gqstn_type = gqstn["type"]
analysisTool = survey_analysis_type[gqstn_type](gqstn_id, ganswers)
map = analysisTool.uniqueCount()
label = map.keys()
data = map.values()
legendLabels.append(T("Count of Question"))
else:
qstn = survey_getQuestionFromCode(numericQuestion, series_id)
qstn_id = qstn["qstn_id"]
qstn_type = qstn["type"]
answers = getAnswers(qstn_id, series_id)
analysisTool = survey_analysis_type[qstn_type](qstn_id, answers)
label = analysisTool.qstnWidget.fullName()
if len(label) > 20:
label = "%s..." % label[0:20]
legendLabels.append(label)
grouped = analysisTool.groupData(ganswers)
aggregate = "Sum"
filtered = analysisTool.filter(aggregate, grouped)
(label, data) = analysisTool.splitGroupedData(filtered)
if data != []:
dataList.append(data)
if dataList == []:
output["chart"] = H4(T("There is insufficient data to draw a chart from the questions selected"))
else:
chartFile = S3SurveySeriesModel.getChartName()
chart = S3Chart(path=chartFile, width=7.2)
chart.asInt = True
chart.survey_bar(labelQuestion,
dataList,
label,
legendLabels)
if outputFormat == None:
image = chart.draw()
else:
image = chart.draw(output=outputFormat)
output["chart"] = image
request = current.request
chartLink = A(T("Download"),
_href=URL(c="survey",
f="series",
args=request.args,
vars=request.vars
)
)
output["chartDownload"] = chartLink
# -------------------------------------------------------------------------
@staticmethod
def seriesMap(r, **attr):
"""
"""
import math
from s3survey import S3AnalysisPriority
T = current.T
response = current.response
s3 = response.s3
request = current.request
gis = current.gis
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id == None:
seriesList = []
append = seriesList.append
records = survey_getAllSeries()
for row in records:
append(row.id)
else:
seriesList = [series_id]
pqstn = {}
pqstn_name = request.post_vars.get("pqstn_name", None)
if pqstn_name is None:
pqstn = survey_getPriorityQuestionForSeries(series_id)
if "name" in pqstn:
pqstn_name = pqstn["name"]
feature_queries = []
bounds = {}
# Build the drop down list of priority questions
allQuestions = survey_getAllQuestionsForSeries(series_id)
numericTypeList = ("Numeric")
numericQuestions = survey_get_series_questions_of_type(allQuestions,
numericTypeList)
numQstns = []
for question in numericQuestions:
numQstns.append(question["name"])
form = FORM(_id="mapQstnForm")
table = TABLE()
if pqstn:
priorityQstn = SELECT(numQstns, _name="pqstn_name",
value=pqstn_name)
else:
priorityQstn = None
# Set up the legend
priorityObj = S3AnalysisPriority(range=[-.66, .66],
colour={-1:"#888888", # grey
0:"#008000", # green
1:"#FFFF00", # yellow
2:"#FF0000", # red
},
# Make Higher-priority show up more clearly
opacity={-1:0.5,
0:0.6,
1:0.7,
2:0.8,
},
image={-1:"grey",
0:"green",
1:"yellow",
2:"red",
},
desc={-1:"No Data",
0:"Low",
1:"Average",
2:"High",
},
zero = True)
for series_id in seriesList:
series_name = survey_getSeriesName(series_id)
response_locations = getLocationList(series_id)
if pqstn == {} and pqstn_name:
for question in numericQuestions:
if pqstn_name == question["name"]:
pqstn = question
if pqstn != {}:
pqstn_id = pqstn["qstn_id"]
answers = survey_getAllAnswersForQuestionInSeries(pqstn_id,
series_id)
analysisTool = survey_analysis_type["Numeric"](pqstn_id,
answers)
analysisTool.advancedResults()
else:
analysisTool = None
if analysisTool != None and not math.isnan(analysisTool.mean):
pBand = analysisTool.priorityBand(priorityObj)
legend = TABLE(
TR (TH(T("Marker Levels"), _colspan=3),
_class= "survey_question"),
)
for key in priorityObj.image.keys():
tr = TR(TD(priorityObj.imageURL(request.application,
key)),
TD(priorityObj.desc(key)),
TD(priorityObj.rangeText(key, pBand)),
)
legend.append(tr)
output["legend"] = legend
if len(response_locations) > 0:
for i in range( 0 , len( response_locations) ):
location = response_locations[i]
complete_id = location.complete_id
# Insert how we want this to appear on the map
url = URL(c="survey",
f="series",
args=[series_id,
"complete",
complete_id,
"read"
]
)
location.shape = "circle"
location.size = 5
if analysisTool is None:
priority = -1
else:
priority = analysisTool.priority(complete_id,
priorityObj)
location.colour = priorityObj.colour[priority]
location.opacity = priorityObj.opacity[priority]
location.popup_url = url
location.popup_label = response_locations[i].name
feature_queries.append({ "name": "%s: Assessments" % series_name,
"query": response_locations,
"active": True })
if bounds == {}:
bounds = (gis.get_bounds(features=response_locations))
else:
new_bounds = gis.get_bounds(features=response_locations)
# Where is merge_bounds defined!?
bounds = merge_bounds([bounds, new_bounds])
if bounds == {}:
bounds = gis.get_bounds()
map = gis.show_map(feature_queries = feature_queries,
#height = 600,
#width = 720,
bbox = bounds,
#collapsed = True,
catalogue_layers = True,
)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
table.append(TR(TH("%s:" % T("Display Question on Map")),
_class="survey_question"))
table.append(priorityQstn)
table.append(series)
form.append(table)
button = INPUT(_type="submit", _name="Chart",
_value=T("Update Map"))
# REMOVED until we have dynamic loading of maps.
#button = INPUT(_type="button", _id="map_btn", _name="Map_Btn", _value=T("Select the Question"))
#jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
#s3.jquery_ready.append('''
#$('#map_btn').click(function(){
# $.post('%s',$('#mapQstnForm').serialize(),function(data){
# obj = jQuery.parseJSON(data);
# $('#survey_map-legend').empty();
# $('#survey_map-legend').append(obj.legend);
# $('#survey_map-container').empty();
# $('#survey_map-container').append(obj.map);
# });
#});
#''' % jurl)
form.append(button)
output["title"] = crud_strings.title_map
output["subtitle"] = crud_strings.subtitle_map
output["instructions"] = T("Click on a marker to see the Completed Assessment Form")
output["form"] = form
output["map"] = map
response.view = "survey/series_map.html"
return output
# =============================================================================
def survey_serieslist_dataTable_post(r):
"""
Replace the Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages.UPDATE,
_class="action-btn edit",
url=URL(c="survey", f="series",
args=["[id]", "summary"]
)
),
]
# =============================================================================
def survey_series_represent(value):
"""
This will display the series name, rather than the id
"""
table = current.s3db.survey_series
row = current.db(table.id == value).select(table.name,
limitby=(0, 1)).first()
return row.name
# =============================================================================
def survey_series_rheader(r):
"""
The series rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if not record:
series_id = current.request.vars.series
record = survey_getSeries(series_id)
if record != None:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Details"), None),
(T("Completed Assessments"), "complete"),
(T("Summary"), "summary"),
(T("Chart"), "graph"),
(T("Map"), "map"),
]
if current.auth.s3_has_permission("create", "survey_complete"):
tabs.insert(1, (T("Enter Completed Assessment"), "newAssessment/"))
rheader_tabs = s3_rheader_tabs(r, tabs)
completeTable = s3db.survey_complete
qty = current.db(completeTable.series_id == record.id).count()
tsection = TABLE(_class="survey-complete-list")
lblSection = T("Number of Completed Assessment Forms")
rsection = TR(TH(lblSection), TD(qty))
tsection.append(rsection)
urlexport = URL(c="survey", f="series_export_formatted",
args=[record.id])
tranForm = FORM(_action=urlexport)
translationList = survey_getAllTranslationsForSeries(record.id)
if len(translationList) > 0:
tranTable = TABLE()
tr = TR(INPUT(_type='radio',
_name='translationLanguage',
_value="Default",
_checked=True,
),
LABEL("Default"))
colCnt = 1
for translation in translationList:
# include a maximum of 4 translation languages per row
if colCnt == 4:
tranTable.append(tr)
tr = TR()
colCnt = 0
tr.append(INPUT(_type="radio",
_name="translationLanguage",
_value=translation["code"],
))
tr.append(LABEL(translation["language"]))
colCnt += 1
if colCnt != 0:
tranTable.append(tr)
tranForm.append(tranTable)
export_xls_btn = INPUT(_type="submit",
_id="export_xls_btn",
_name="Export_Spreadsheet",
_value=T("Download Assessment Form Spreadsheet"),
_class="action-btn"
)
tranForm.append(export_xls_btn)
try:
# only add the Export to Word button up if PyRTF is installed
from PyRTF import Document
export_rtf_btn = INPUT(_type="submit",
_id="export_rtf_btn",
_name="Export_Word",
_value=T("Download Assessment Form Document"),
_class="action-btn"
)
tranForm.append(export_rtf_btn)
except:
pass
urlimport = URL(c="survey",
f="export_all_responses",
args=[record.id],
)
buttons = DIV(A(T("Export all Completed Assessment Data"),
_href=urlimport,
_id="All_resposnes",
_class="action-btn"
),
)
rheader = DIV(TABLE(
TR(TH("%s: " % T("Template")),
survey_template_represent(record.template_id),
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_series_status[record.status],
),
),
tsection,
tranForm,
buttons,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getSeries(series_id):
"""
function to return the series from a series id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getSeriesName(series_id):
"""
function to return the Series Name from the id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.name,
limitby=(0, 1)).first()
try:
return row.name
except:
return ""
# =============================================================================
def survey_getAllSeries():
"""
function to return all the series on the database
"""
table = current.s3db.survey_series
row = current.db(table.id > 0).select()
return row
# =============================================================================
def survey_buildQuestionnaireFromSeries(series_id, complete_id=None):
"""
build a form displaying all the questions for a given series_id
If the complete_id is also provided then the responses to each
completed question will also be displayed
"""
questions = survey_getAllQuestionsForSeries(series_id)
return buildQuestionsForm(questions, complete_id)
# =============================================================================
def survey_save_answers_for_series(series_id, complete_id, vars):
"""
function to save the list of answers for a completed series
"""
questions = survey_getAllQuestionsForSeries(series_id)
return saveAnswers(questions, series_id, complete_id, vars)
# =============================================================================
def saveAnswers(questions, series_id, complete_id, vars):
"""
"""
text = ""
table = current.s3db.survey_complete
for question in questions:
code = question["code"]
if (code in vars) and vars[code] != "":
line = '"%s","%s"\n' % (code, vars[code])
text += line
if complete_id == None:
# Insert into database
id = table.insert(series_id = series_id, answer_list = text)
S3SurveyCompleteModel.completeOnAccept(id)
return id
else:
# Update the complete_id record
current.db(table.id == complete_id).update(answer_list = text)
S3SurveyCompleteModel.completeOnAccept(complete_id)
return complete_id
# =============================================================================
def survey_getPriorityQuestionForSeries(series_id):
"""
"""
templateRec = survey_getTemplateFromSeries(series_id)
if templateRec != None:
priorityQstnCode = templateRec["priority_qstn"]
question = survey_getQuestionFromCode(priorityQstnCode, series_id)
return question
else:
return None
# =============================================================================
def buildSeriesSummary(series_id, posn_offset):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="series_summary",
_class="dataTable display")
hr = TR(TH(""), # Bulk action column
TH(T("Position")),
TH(T("Question")),
TH(T("Type")),
TH(T("Summary"))
)
header = THEAD(hr)
questions = survey_getAllQuestionsForSeries(series_id)
line = []
body = TBODY()
for question in questions:
if question["type"] == "Grid":
continue
question_id = question["qstn_id"]
widgetObj = survey_getWidgetFromQuestion(question_id)
br = TR()
posn = int(question["posn"])+posn_offset
br.append(TD(INPUT(_id="select%s" % posn,
_type="checkbox",
_class="bulkcheckbox",
)))
br.append(posn) # add an offset to make all id's +ve
br.append(widgetObj.fullName())
#br.append(question["name"])
type = widgetObj.type_represent()
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
analysisTool = survey_analysis_type[question["type"]](question_id,
answers)
chart = analysisTool.chartButton(series_id)
cell = TD()
cell.append(type)
if chart:
cell.append(chart)
br.append(cell)
analysisTool.count()
br.append(analysisTool.summary())
body.append(br)
table.append(header)
table.append(body)
s3 = current.response.s3
# Turn off server side pagination
s3.no_sspag = True
# Turn multi-select on
s3.dataTableBulkActions = [current.T("Display Selected Questions")]
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"series_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
series = INPUT(_type="hidden", _id="selectSeriesID", _name="series",
_value="%s" % series_id)
form.append(series)
return form
# =============================================================================
class S3SurveyCompleteModel(S3Model):
"""
Completed Surveys Model
"""
names = ["survey_complete",
"survey_answer",
]
def model(self):
T = current.T
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# The survey_complete table holds all of the answers for a completed
# response. It has a link back to the series this response belongs to.
#
# Whilst this table holds all of the answers in a text field during
# the onaccept each answer is extracted and then stored in the
# survey_answer table. This process of moving the answers to a
# separate table makes it easier to analyse the answers
# for a given question across all responses.
tablename = "survey_complete"
table = define_table(tablename,
Field("series_id", "reference survey_series",
represent = survey_series_represent,
label = T("Series"),
readable=False,
writable=False
),
Field("answer_list", "text",
represent = survey_answer_list_represent
),
Field("location", "text",
readable=False,
writable=False
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Enter Completed Assessment Form"),
title_display = T("Completed Assessment Form Details"),
title_list = T("Completed Assessment Forms"),
title_update = T("Edit Completed Assessment Form"),
title_selected = T("Selected Questions for all Completed Assessment Forms"),
subtitle_create = T("Enter Completed Assessment Form"),
subtitle_selected = T("Selected Questions for all Completed Assessment Forms"),
label_list_button = T("List Completed Assessment Forms"),
label_create_button = T("Add a new Completed Assessment Form"),
label_delete_button = T("Delete this Completed Assessment Form"),
msg_record_created = T("Completed Assessment Form entered"),
msg_record_modified = T("Completed Assessment Form updated"),
msg_record_deleted = T("Completed Assessment Form deleted"),
msg_list_empty = T("No Completed Assessment Forms"),
title_upload = T("Upload the Completed Assessment Form")
)
configure(tablename,
onvalidation = self.complete_onvalidate,
onaccept = self.complete_onaccept,
deduplicate=self.survey_complete_duplicate,
)
self.add_component("survey_complete",
survey_series = dict(joinby="series_id",
multiple=True)
)
# ---------------------------------------------------------------------
# The survey_answer table holds the answer for a single response
# of a given question.
tablename = "survey_answer"
table = define_table(tablename,
Field("complete_id", "reference survey_complete",
readable=False,
writable=False
),
Field("question_id", "reference survey_question",
readable=True,
writable=False
),
Field("value", "text",
readable=True,
writable=True
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("Add Assessment Answer"),
title_display = T("Assessment Answer Details"),
title_list = T("Assessment Answers"),
title_update = T("Edit Assessment Answer"),
subtitle_create = T("Add a new Assessment Answer"),
label_list_button = T("List Assessment Answers"),
label_create_button = T("Add a new Assessment Answer"),
label_delete_button = T("Delete this Assessment Answer"),
msg_record_created = T("Assessment Answer added"),
msg_record_modified = T("Assessment Answer updated"),
msg_record_deleted = T("Assessment Answer deleted"),
msg_list_empty = T("No Assessment Answers"))
configure(tablename,
onaccept = self.answer_onaccept,
deduplicate = self.survey_answer_duplicate
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def extractAnswerFromAnswerList(answerList, qstnCode):
"""
function to extract the answer for the question code
passed in from the list of answers. This is in a CSV
format created by the XSL stylesheet or by the function
saveAnswers()
"""
start = answerList.find(qstnCode)
if start == -1:
return None
start = start + len(qstnCode) + 3
end = answerList.find('"', start)
answer = answerList[start:end]
return answer
# -------------------------------------------------------------------------
@staticmethod
def complete_onvalidate(form):
"""
"""
T = current.T
vars = form.vars
if "series_id" not in vars or vars.series_id == None:
form.errors.series_id = T("Series details missing")
return False
if "answer_list" not in vars or vars.answer_list == None:
form.errors.answer_list = T("The answers are missing")
return False
series_id = vars.series_id
answer_list = vars.answer_list
qstn_list = survey_getAllQuestionsForSeries(series_id)
qstns = []
for qstn in qstn_list:
qstns.append(qstn["code"])
answerList = answer_list.splitlines(True)
for answer in answerList:
qstn_code = answer[1:answer.find('","')]
if qstn_code not in qstns:
msg = "%s: %s" % (T("Unknown question code"), qstn_code)
if answer_list not in form.errors:
form.errors.answer_list = msg
else:
form.errors.answer_list += msg
return True
# -------------------------------------------------------------------------
@staticmethod
def complete_onaccept(form):
"""
All of the answers will be stored in the answer_list in the
format "code","answer"
They will then be inserted into the survey_answer table
each item will be a record on that table.
This will also extract the default location question as
defined by the template and store this in the location field
"""
if form.vars.id:
S3SurveyCompleteModel.completeOnAccept(form.vars.id)
# -------------------------------------------------------------------------
@staticmethod
def completeOnAccept(complete_id):
"""
"""
# Get the basic data that is needed
s3db = current.s3db
rtable = s3db.survey_complete
atable = s3db.survey_answer
record = rtable[complete_id]
series_id = record.series_id
purgePrefix = "survey_series_%s" % series_id
S3Chart.purgeCache(purgePrefix)
if series_id == None:
return
# Save all the answers from answerList in the survey_answer table
answerList = record.answer_list
S3SurveyCompleteModel.importAnswers(complete_id, answerList)
# Extract the default template location question and save the
# answer in the location field
templateRec = survey_getTemplateFromSeries(series_id)
locDetails = templateRec["location_detail"]
if not locDetails:
return
widgetObj = get_default_location(complete_id)
if widgetObj:
current.db(rtable.id == complete_id).update(location = widgetObj.repr())
locations = get_location_details(complete_id)
S3SurveyCompleteModel.importLocations(locations)
# -------------------------------------------------------------------------
@staticmethod
def importAnswers(id, list):
"""
private function used to save the answer_list stored in
survey_complete into answer records held in survey_answer
"""
import csv
import os
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
strio = StringIO()
strio.write(list)
strio.seek(0)
answer = []
append = answer.append
reader = csv.reader(strio)
for row in reader:
if row != None:
row.insert(0, id)
append(row)
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
writerow = writer.writerow
writerow(["complete_id", "question_code", "value"])
for row in answer:
writerow(row)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"survey",
"answer.xsl")
resource = current.s3db.resource("survey_answer")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def importLocations(location_dict):
"""
private function used to save the locations to gis.location
"""
import csv
import os
lastLocWidget = None
codeList = ["STD-L0","STD-L1","STD-L2","STD-L3","STD-L4"]
headingList = ["Country",
"ADM1_NAME",
"ADM2_NAME",
"ADM3_NAME",
"ADM4_NAME"
]
cnt = 0
answer = []
headings = []
aappend = answer.append
happend = headings.append
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
lastLocWidget = location_dict[loc]
happend(headingList[cnt])
cnt += 1
# Check that we have at least one location question answered
if lastLocWidget == None:
return
codeList = ["STD-P-Code","STD-Lat","STD-Lon"]
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
else:
aappend("")
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
headings += ["Code2", "Lat", "Lon"]
writer.writerow(headings)
writer.writerow(answer)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"gis",
"location.xsl")
resource = current.s3db.resource("gis_location")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def survey_complete_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name, answer_list
"""
if job.tablename == "survey_complete":
table = job.table
data = job.data
answers = "answer_list" in data and data.answer_list
query = (table.answer_list == answers)
try:
return duplicator(job, query)
except:
# if this is part of an import then the select will throw an error
# if the question code doesn't exist.
# This can happen during an import if the wrong file is used.
return
# -------------------------------------------------------------------------
@staticmethod
def answer_onaccept(form):
"""
Some question types may require additional processing
"""
vars = form.vars
if vars.complete_id and vars.question_id:
atable = current.s3db.survey_answer
complete_id = vars.complete_id
question_id = vars.question_id
value = vars.value
widgetObj = survey_getWidgetFromQuestion(question_id)
newValue = widgetObj.onaccept(value)
if newValue != value:
query = (atable.question_id == question_id) & \
(atable.complete_id == complete_id)
current.db(query).update(value = newValue)
# -------------------------------------------------------------------------
@staticmethod
def survey_answer_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same complete_id and question_id
"""
if job.tablename == "survey_answer":
table = job.table
data = job.data
qid = "question_id" in data and data.question_id
rid = "complete_id" in data and data.complete_id
query = (table.question_id == qid) & \
(table.complete_id == rid)
return duplicator(job, query)
# =============================================================================
def survey_answerlist_dataTable_pre():
"""
The answer list has been removed for the moment. Currently it
displays all answers for a summary it would be better to
be able to display just a few select answers
"""
list_fields = ["created_on", "series_id", "location", "modified_by"]
current.s3db.configure("survey_complete", list_fields=list_fields)
# =============================================================================
def survey_answerlist_dataTable_post(r):
"""
Replace Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages["UPDATE"],
_class="action-btn edit",
url=URL(c="survey", f="series",
args=[r.id, "complete", "[id]", "update"])
),
]
# =============================================================================
def survey_answer_list_represent(value):
"""
Display the answer list in a formatted table.
Displaying the full question (rather than the code)
and the answer.
"""
db = current.db
qtable = current.s3db.survey_question
answer_text = value
list = answer_text.splitlines()
result = TABLE()
questions = {}
xml_decode = S3Codec.xml_decode
for line in list:
line = xml_decode(line)
(question, answer) = line.split(",",1)
question = question.strip("\" ")
if question in questions:
question = questions[question]
else:
query = (qtable.code == question)
qstn = db(query).select(qtable.name,
limitby=(0, 1)).first()
if not qstn:
continue
questions[question] = qstn.name
question = qstn.name
answer = answer.strip("\" ")
result.append(TR(TD(B(question)), TD(answer)))
return result
# =============================================================================
def get_location_details(complete_id):
"""
It will return a dict of values for all of the standard location
questions that have been answered
"""
db = current.db
s3db = current.s3db
locations = {}
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-P-Code",
"STD-L0", "STD-L1", "STD-L2", "STD-L3", "STD-L4",
"STD-Lat", "STD-Lon"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
widgetObj.loadAnswer(complete_id, record.id)
locations[locCode] = widgetObj
return locations
# =============================================================================
def get_default_location(complete_id):
"""
It will check each standard location question in
the hierarchy until either one is found or none are found
"""
db = current.db
s3db = current.s3db
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
break
if record:
widgetObj.loadAnswer(complete_id, record.id)
return widgetObj
else:
return None
# =============================================================================
def survey_getAllAnswersForQuestionInSeries(question_id, series_id):
"""
function to return all the answers for a given question
from with a specified series
"""
s3db = current.s3db
ctable = s3db.survey_complete
atable = s3db.survey_answer
query = (atable.question_id == question_id) & \
(atable.complete_id == ctable.id) & \
(ctable.series_id == series_id)
rows = current.db(query).select(atable.id,
atable.value,
atable.complete_id)
answers = []
for row in rows:
answer = {}
answer["answer_id"] = row.id
answer["value"] = row.value
answer["complete_id"] = row.complete_id
answers.append(answer)
return answers
# =============================================================================
def buildTableFromCompletedList(dataSource):
"""
"""
headers = dataSource[0]
items = dataSource[2:]
table = TABLE(_id="completed_list",
_class="dataTable display")
hr = TR()
for title in headers:
hr.append(TH(title))
header = THEAD(hr)
body = TBODY()
for row in items:
tr = TR()
for answer in row:
tr.append(TD(answer))
body.append(tr)
table.append(header)
table.append(body)
# Turn off server side pagination
current.response.s3.no_sspag = True
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"completed_list",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
return form
# =============================================================================
def buildCompletedList(series_id, question_id_list):
"""
build a list of completed items for the series including
just the questions in the list passed in
The list will come in three parts.
1) The first row is the header (list of field labels)
2) The seconds row is the type of each column
3) The remaining rows are the data
@param series_id: The id of the series
@param question_id_list: The list of questions to display
"""
db = current.db
qtable = current.s3db.survey_question
headers = []
happend = headers.append
types = []
items = []
qstn_posn = 0
rowLen = len(question_id_list)
complete_lookup = {}
for question_id in question_id_list:
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
widgetObj = survey_getWidgetFromQuestion(question_id)
question = db(qtable.id == question_id).select(qtable.name,
limitby=(0, 1)).first()
happend(question.name)
types.append(widgetObj.db_type())
for answer in answers:
complete_id = answer["complete_id"]
if complete_id in complete_lookup:
row = complete_lookup[complete_id]
else:
row = len(complete_lookup)
complete_lookup[complete_id]=row
items.append([''] * rowLen)
items[row][qstn_posn] = widgetObj.repr(answer["value"])
qstn_posn += 1
return [headers] + [types] + items
# =============================================================================
def getLocationList(series_id):
"""
Get a list of the LatLons for each Response in a Series
"""
response_locations = []
rappend = response_locations.append
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
table = current.s3db.survey_complete
rows = current.db(table.series_id == series_id).select(table.id,
table.answer_list)
for row in rows:
lat = None
lon = None
name = None
answer_list = row.answer_list.splitlines()
answer_dict = {}
for line in answer_list:
(question, answer) = line.split(",", 1)
question = question.strip('"')
if question in codeList:
# Store to get the name
answer_dict[question] = answer.strip('"')
elif question == "STD-Lat":
try:
lat = float(answer.strip('"'))
except:
pass
else:
if lat < -90.0 or lat > 90.0:
lat = None
elif question == "STD-Lon":
try:
lon = float(answer.strip('"'))
except:
pass
else:
if lon < -180.0 or lon > 180.0:
lon = None
else:
# Not relevant here
continue
for locCode in codeList:
# Retrieve the name of the lowest Lx
if locCode in answer_dict:
name = answer_dict[locCode]
break
if lat and lon:
# We have sufficient data to display on the map
location = Row()
location.lat = lat
location.lon = lon
location.name = name
location.complete_id = row.id
rappend(location)
else:
# The lat & lon were not added to the assessment so try and get one
locWidget = get_default_location(row.id)
if locWidget:
complete_id = locWidget.question["complete_id"]
if "answer" not in locWidget.question:
continue
answer = locWidget.question["answer"]
if locWidget != None:
record = locWidget.getLocationRecord(complete_id, answer)
if len(record.records) == 1:
location = record.records[0].gis_location
location.complete_id = complete_id
rappend(location)
return response_locations
# =============================================================================
class S3SurveyTranslateModel(S3Model):
"""
Translations Model
"""
from gluon.languages import read_dict, write_dict
names = ["survey_translate"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# The survey_translate table holds the details of the language
# for which the template has been translated into.
LANG_HELP = T("This is the full name of the language and will be displayed to the user when selecting the template language.")
CODE_HELP = T("This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.")
tablename = "survey_translate"
table = self.define_table(tablename,
self.survey_template_id(),
Field("language",
readable=True,
writable=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
LANG_HELP))
),
Field("code",
readable=True,
writable=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language Code"),
CODE_HELP))
),
Field("file", "upload",
autodelete=True),
Field("filename",
readable=False,
writable=False),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add new translation language"),
)
self.configure(tablename,
onaccept = self.translate_onaccept,
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def translate_onaccept(form):
"""
If the translation spreadsheet has been uploaded then
it needs to be processed.
The translation strings need to be extracted from
the spreadsheet and inserted into the language file.
"""
if "file" in form.vars:
try:
import xlrd
except ImportError:
print >> sys.stderr, "ERROR: xlrd & xlwt modules are needed for importing spreadsheets"
return None
from gluon.languages import read_dict, write_dict
T = current.T
request = current.request
response = current.response
msgNone = T("No translations exist in spreadsheet")
upload_file = request.post_vars.file
upload_file.file.seek(0)
openFile = upload_file.file.read()
lang = form.record.language
code = form.record.code
try:
workbook = xlrd.open_workbook(file_contents=openFile)
except:
msg = T("Unable to open spreadsheet")
response.error = msg
response.flash = None
return
try:
sheetL = workbook.sheet_by_name(lang)
except:
msg = T("Unable to find sheet %(sheet_name)s in uploaded spreadsheet") % \
dict(sheet_name=lang)
response.error = msg
response.flash = None
return
if sheetL.ncols == 1:
response.warning = msgNone
response.flash = None
return
count = 0
lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % \
(request.application, code)
try:
strings = read_dict(lang_fileName)
except:
strings = dict()
for row in xrange(1, sheetL.nrows):
original = sheetL.cell_value(row, 0)
translation = sheetL.cell_value(row, 1)
if (original not in strings) or translation != "":
strings[original] = translation
count += 1
write_dict(lang_fileName, strings)
if count == 0:
response.warning = msgNone
response.flash = None
else:
response.flash = T("%(count_of)d translations have been imported to the %(language)s language file") % \
dict(count_of=count, language=lang)
# =============================================================================
def survey_getAllTranslationsForTemplate(template_id):
"""
Function to return all the translations for the given template
"""
table = current.s3db.survey_translate
row = current.db(table.template_id == template_id).select()
return row
# =============================================================================
def survey_getAllTranslationsForSeries(series_id):
"""
Function to return all the translations for the given series
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
return survey_getAllTranslationsForTemplate(template_id)
# =============================================================================
# Generic function called by the duplicator methods to determine if the
# record already exists on the database.
def duplicator(job, query):
"""
This callback will be called when importing records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
"""
table = job.table
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# END =========================================================================
| mit |
SiddheshK15/android_kernel_yu_msm8916_gcc5 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.