repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
huihoo/reader | apps/rss_feeds/text_importer.py | 1 | 1701 | import requests
import zlib
from django.conf import settings
from vendor.readability import readability
from utils import log as logging
class TextImporter:
def __init__(self, story, request=None):
self.story = story
self.request = request
@property
def headers(self):
return {
'User-Agent': 'NewsBlur Content Fetcher - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' % (
settings.NEWSBLUR_URL
),
'Connection': 'close',
}
def fetch(self, skip_save=False):
try:
html = requests.get(self.story.story_permalink, headers=self.headers)
original_text_doc = readability.Document(html.text, url=html.url, debug=settings.DEBUG)
content = original_text_doc.summary(html_partial=True)
except:
content = None
if content:
if not skip_save:
self.story.original_text_z = zlib.compress(content)
self.story.save()
logging.user(self.request, "~SN~FYFetched ~FGoriginal text~FY: now ~SB%s bytes~SN vs. was ~SB%s bytes" % (
len(unicode(content)),
self.story.story_content_z and len(zlib.decompress(self.story.story_content_z))
))
else:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: was ~SB%s bytes" % (
len(zlib.decompress(self.story.story_content_z))
))
return content | mit |
bjko/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py | 121 | 4733 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.test import TestPort
from webkitpy.tool.commands.perfalizer import PerfalizerTask
from webkitpy.tool.mocktool import MockTool
class PerfalizerTaskTest(unittest.TestCase):
def _create_and_run_perfalizer(self, commands_to_fail=[]):
tool = MockTool()
patch = tool.bugs.fetch_attachment(10000)
logs = []
def logger(message):
logs.append(message)
def run_webkit_patch(args):
if args[0] in commands_to_fail:
raise ScriptError
def run_perf_test(build_path, description):
self.assertTrue(description == 'without 10000' or description == 'with 10000')
if 'run-perf-tests' in commands_to_fail:
return -1
if 'results-page' not in commands_to_fail:
tool.filesystem.write_text_file(tool.filesystem.join(build_path, 'PerformanceTestResults.html'), 'results page')
return 0
perfalizer = PerfalizerTask(tool, patch, logger)
perfalizer._port = TestPort(tool)
perfalizer.run_webkit_patch = run_webkit_patch
perfalizer._run_perf_test = run_perf_test
capture = OutputCapture()
capture.capture_output()
if commands_to_fail:
self.assertFalse(perfalizer.run())
else:
self.assertTrue(perfalizer.run())
capture.restore_output()
return logs
def test_run(self):
self.assertEqual(self._create_and_run_perfalizer(), [
'Preparing to run performance tests for the attachment 10000...',
'Building WebKit at r1234 without the patch',
'Building WebKit at r1234 with the patch',
'Running performance tests...',
'Uploaded the results on the bug 50000'])
def test_run_with_clean_fails(self):
self.assertEqual(self._create_and_run_perfalizer(['clean']), [
'Preparing to run performance tests for the attachment 10000...',
'Unable to clean working directory'])
def test_run_with_update_fails(self):
logs = self._create_and_run_perfalizer(['update'])
self.assertEqual(len(logs), 2)
self.assertEqual(logs[-1], 'Unable to update working directory')
def test_run_with_build_fails(self):
logs = self._create_and_run_perfalizer(['build'])
self.assertEqual(len(logs), 3)
def test_run_with_build_fails(self):
logs = self._create_and_run_perfalizer(['apply-attachment'])
self.assertEqual(len(logs), 4)
def test_run_with_perf_test_fails(self):
logs = self._create_and_run_perfalizer(['run-perf-tests'])
self.assertEqual(len(logs), 5)
self.assertEqual(logs[-1], 'Failed to run performance tests without the patch.')
def test_run_without_results_page(self):
logs = self._create_and_run_perfalizer(['results-page'])
self.assertEqual(len(logs), 5)
self.assertEqual(logs[-1], 'Failed to generate the results page.')
| bsd-3-clause |
tropp/acq4 | acq4/util/HelpfulException.py | 4 | 1453 | ## test to see if new branch is working
import sys
class HelpfulException(Exception):
"""Allows for stacked exceptions.
Initalization:
message: The error message to the user. ex: Device could not be found.
exc: The original exception object
reasons: Reasons why the exception may have occurred. ex: "a. Device initialization failed during startup. b. Device Gui was closed."
docs: Referral to documentation.
When you catch a HelpfulException:
-- add additional information to the original exception
-- use self.prependErr("Additional message, ex: Protocol initiation failed. ", exc, reasons="a. A device could not be found.", docs='')
"""
def __init__(self, message='', exc=None, reasons=None, docs=None, **kwargs):
Exception.__init__(self, message)
self.kwargs = kwargs
if exc is None:
exc = sys.exc_info()
self.oldExc = exc
#self.messages = [message]
if reasons is None:
self.reasons = []
else:
self.reasons = reasons
if docs is None:
self.docs = []
else:
self.docs = docs
#def prependErr(self, msg, exc, reasons='', docs=''):
#self.messages.insert(0, msg)
#self.excs.insert(0, exc)
#self.reasons.insert(0, reasons)
#self.reasons.insert(0, docs)
| mit |
craynot/django | django/db/models/expressions.py | 74 | 34365 | import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q, refs_aggregate
from django.utils import six, timezone
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def __init__(self, output_field=None):
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
def __init__(self, *expressions, **extra):
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def as_sqlite(self, *args, **kwargs):
sql, params = self.as_sql(*args, **kwargs)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, extra=None):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(Expression):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.lookup, self.lookup_type)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, expression, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(Expression):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def __repr__(self):
return "{}({}, {}, {})".format(
self.__class__.__name__, self.lookup, self.lookup_type, self.tzinfo)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, expression, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {'expression': expression_sql}
placeholders['ordering'] = 'DESC' if self.descending else 'ASC'
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| bsd-3-clause |
ashray/VTK-EVM | ThirdParty/Twisted/twisted/protocols/ftp.py | 23 | 101280 | # -*- test-case-name: twisted.test.test_ftp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An FTP protocol implementation
"""
# System Imports
import os
import time
import re
import operator
import stat
import errno
import fnmatch
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import Interface, implements
# Twisted Imports
from twisted import copyright
from twisted.internet import reactor, interfaces, protocol, error, defer
from twisted.protocols import basic, policies
from twisted.python import log, failure, filepath
from twisted.cred import error as cred_error, portal, credentials, checkers
# constants
# response codes
RESTART_MARKER_REPLY = "100"
SERVICE_READY_IN_N_MINUTES = "120"
DATA_CNX_ALREADY_OPEN_START_XFR = "125"
FILE_STATUS_OK_OPEN_DATA_CNX = "150"
CMD_OK = "200.1"
TYPE_SET_OK = "200.2"
ENTERING_PORT_MODE = "200.3"
CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
SYS_STATUS_OR_HELP_REPLY = "211.1"
FEAT_OK = '211.2'
DIR_STATUS = "212"
FILE_STATUS = "213"
HELP_MSG = "214"
NAME_SYS_TYPE = "215"
SVC_READY_FOR_NEW_USER = "220.1"
WELCOME_MSG = "220.2"
SVC_CLOSING_CTRL_CNX = "221.1"
GOODBYE_MSG = "221.2"
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
CLOSING_DATA_CNX = "226.1"
TXFR_COMPLETE_OK = "226.2"
ENTERING_PASV_MODE = "227"
ENTERING_EPSV_MODE = "229"
USR_LOGGED_IN_PROCEED = "230.1" # v1 of code 230
GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
REQ_FILE_ACTN_COMPLETED_OK = "250"
PWD_REPLY = "257.1"
MKD_REPLY = "257.2"
USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
NEED_ACCT_FOR_LOGIN = "332"
REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
TOO_MANY_CONNECTIONS = "421.2"
CANT_OPEN_DATA_CNX = "425"
CNX_CLOSED_TXFR_ABORTED = "426"
REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
REQ_ACTN_ABRTD_LOCAL_ERR = "451"
REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
SYNTAX_ERR = "500"
SYNTAX_ERR_IN_ARGS = "501"
CMD_NOT_IMPLMNTD = "502.1"
OPTS_NOT_IMPLEMENTED = '502.2'
BAD_CMD_SEQ = "503"
CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
NOT_LOGGED_IN = "530.1" # v1 of code 530 - please log in
AUTH_FAILURE = "530.2" # v2 of code 530 - authorization failure
NEED_ACCT_FOR_STOR = "532"
FILE_NOT_FOUND = "550.1" # no such file or directory
PERMISSION_DENIED = "550.2" # permission denied
ANON_USER_DENIED = "550.3" # anonymous users can't alter filesystem
IS_NOT_A_DIR = "550.4" # rmd called on a path that is not a directory
REQ_ACTN_NOT_TAKEN = "550.5"
FILE_EXISTS = "550.6"
IS_A_DIR = "550.7"
PAGE_TYPE_UNK = "551"
EXCEEDED_STORAGE_ALLOC = "552"
FILENAME_NOT_ALLOWED = "553"
RESPONSE = {
# -- 100's --
RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', # TODO: this must be fixed
SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes',
DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
# -- 200's --
CMD_OK: '200 Command OK',
TYPE_SET_OK: '200 Type set to %s.',
ENTERING_PORT_MODE: '200 PORT OK',
CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site',
SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
FEAT_OK: ['211-Features:','211 End'],
DIR_STATUS: '212 %s',
FILE_STATUS: '213 %s',
HELP_MSG: '214 help: %s',
NAME_SYS_TYPE: '215 UNIX Type: L8',
WELCOME_MSG: "220 %s",
SVC_READY_FOR_NEW_USER: '220 Service ready',
SVC_CLOSING_CTRL_CNX: '221 Service closing control connection',
GOODBYE_MSG: '221 Goodbye.',
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
CLOSING_DATA_CNX: '226 Abort successful',
TXFR_COMPLETE_OK: '226 Transfer Complete.',
ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', # where is epsv defined in the rfc's?
USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', #i.e. CWD completed ok
PWD_REPLY: '257 "%s"',
MKD_REPLY: '257 "%s" created',
# -- 300's --
USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
NEED_ACCT_FOR_LOGIN: '332 Need account for login.',
REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
# -- 400's --
SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.',
TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.',
CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
REQ_ACTN_ABRTD_FILE_UNAVAIL: '450 Requested action aborted. File unavailable.',
REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.',
REQ_ACTN_ABRTD_INSUFF_STORAGE: '452 Requested action aborted. Insufficient storage.',
# -- 500's --
SYNTAX_ERR: "500 Syntax error: %s",
SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
OPTS_NOT_IMPLEMENTED: "502 Option '%s' not implemented.",
BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
NOT_LOGGED_IN: '530 Please login with USER and PASS.',
AUTH_FAILURE: '530 Sorry, Authentication failed.',
NEED_ACCT_FOR_STOR: '532 Need an account for storing files',
FILE_NOT_FOUND: '550 %s: No such file or directory.',
PERMISSION_DENIED: '550 %s: Permission denied.',
ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem',
IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory',
FILE_EXISTS: '550 %s: File exists',
IS_A_DIR: '550 %s: is a directory',
REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s',
PAGE_TYPE_UNK: '551 Page type unknown',
EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation',
FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'
}
class InvalidPath(Exception):
"""
Internal exception used to signify an error during parsing a path.
"""
def toSegments(cwd, path):
"""
Normalize a path, as represented by a list of strings each
representing one segment of the path.
"""
if path.startswith('/'):
segs = []
else:
segs = cwd[:]
for s in path.split('/'):
if s == '.' or s == '':
continue
elif s == '..':
if segs:
segs.pop()
else:
raise InvalidPath(cwd, path)
elif '\0' in s or '/' in s:
raise InvalidPath(cwd, path)
else:
segs.append(s)
return segs
def errnoToFailure(e, path):
"""
Map C{OSError} and C{IOError} to standard FTP errors.
"""
if e == errno.ENOENT:
return defer.fail(FileNotFoundError(path))
elif e == errno.EACCES or e == errno.EPERM:
return defer.fail(PermissionDeniedError(path))
elif e == errno.ENOTDIR:
return defer.fail(IsNotADirectoryError(path))
elif e == errno.EEXIST:
return defer.fail(FileExistsError(path))
elif e == errno.EISDIR:
return defer.fail(IsADirectoryError(path))
else:
return defer.fail()
def _isGlobbingExpression(segments=None):
"""
Helper for checking if a FTPShell `segments` contains a wildcard Unix
expression.
Only filename globbing is supported.
This means that wildcards can only be presents in the last element of
`segments`.
@type segments: C{list}
@param segments: List of path elements as used by the FTP server protocol.
@rtype: Boolean
@return: True if `segments` contains a globbing expression.
"""
if not segments:
return False
# To check that something is a glob expression, we convert it to
# Regular Expression. If the result is the same as the original expression
# then it contains no globbing expression.
globCandidate = segments[-1]
# A set of default regex rules is added to all strings.
emtpyTranslations = fnmatch.translate('')
globTranslations = fnmatch.translate(globCandidate)
if globCandidate + emtpyTranslations == globTranslations:
return False
else:
return True
class FTPCmdError(Exception):
"""
Generic exception for FTP commands.
"""
def __init__(self, *msg):
Exception.__init__(self, *msg)
self.errorMessage = msg
def response(self):
"""
Generate a FTP response message for this error.
"""
return RESPONSE[self.errorCode] % self.errorMessage
class FileNotFoundError(FTPCmdError):
"""
Raised when trying to access a non existent file or directory.
"""
errorCode = FILE_NOT_FOUND
class AnonUserDeniedError(FTPCmdError):
"""
Raised when an anonymous user issues a command that will alter the
filesystem
"""
errorCode = ANON_USER_DENIED
class PermissionDeniedError(FTPCmdError):
"""
Raised when access is attempted to a resource to which access is
not allowed.
"""
errorCode = PERMISSION_DENIED
class IsNotADirectoryError(FTPCmdError):
"""
Raised when RMD is called on a path that isn't a directory.
"""
errorCode = IS_NOT_A_DIR
class FileExistsError(FTPCmdError):
"""
Raised when attempted to override an existing resource.
"""
errorCode = FILE_EXISTS
class IsADirectoryError(FTPCmdError):
"""
Raised when DELE is called on a path that is a directory.
"""
errorCode = IS_A_DIR
class CmdSyntaxError(FTPCmdError):
"""
Raised when a command syntax is wrong.
"""
errorCode = SYNTAX_ERR
class CmdArgSyntaxError(FTPCmdError):
"""
Raised when a command is called with wrong value or a wrong number of
arguments.
"""
errorCode = SYNTAX_ERR_IN_ARGS
class CmdNotImplementedError(FTPCmdError):
"""
Raised when an unimplemented command is given to the server.
"""
errorCode = CMD_NOT_IMPLMNTD
class CmdNotImplementedForArgError(FTPCmdError):
"""
Raised when the handling of a parameter for a command is not implemented by
the server.
"""
errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM
class FTPError(Exception):
pass
class PortConnectionError(Exception):
pass
class BadCmdSequenceError(FTPCmdError):
"""
Raised when a client sends a series of commands in an illogical sequence.
"""
errorCode = BAD_CMD_SEQ
class AuthorizationError(FTPCmdError):
"""
Raised when client authentication fails.
"""
errorCode = AUTH_FAILURE
def debugDeferred(self, *_):
log.msg('debugDeferred(): %s' % str(_), debug=True)
# -- DTP Protocol --
_months = [
None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class DTP(object, protocol.Protocol):
implements(interfaces.IConsumer)
isConnected = False
_cons = None
_onConnLost = None
_buffer = None
def connectionMade(self):
self.isConnected = True
self.factory.deferred.callback(None)
self._buffer = []
def connectionLost(self, reason):
self.isConnected = False
if self._onConnLost is not None:
self._onConnLost.callback(None)
def sendLine(self, line):
"""
Send a line to data channel.
@param line: The line to be sent.
@type line: L{bytes}
"""
self.transport.write(line + '\r\n')
def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group):
def formatMode(mode):
return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)])
def formatDate(mtime):
now = time.gmtime()
info = {
'month': _months[mtime.tm_mon],
'day': mtime.tm_mday,
'year': mtime.tm_year,
'hour': mtime.tm_hour,
'minute': mtime.tm_min
}
if now.tm_year != mtime.tm_year:
return '%(month)s %(day)02d %(year)5d' % info
else:
return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info
format = ('%(directory)s%(permissions)s%(hardlinks)4d '
'%(owner)-9s %(group)-9s %(size)15d %(date)12s '
'%(name)s')
return format % {
'directory': directory and 'd' or '-',
'permissions': formatMode(permissions),
'hardlinks': hardlinks,
'owner': owner[:8],
'group': group[:8],
'size': size,
'date': formatDate(time.gmtime(modified)),
'name': name}
def sendListResponse(self, name, response):
self.sendLine(self._formatOneListResponse(name, *response))
# Proxy IConsumer to our transport
def registerProducer(self, producer, streaming):
return self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
self.transport.loseConnection()
def write(self, data):
if self.isConnected:
return self.transport.write(data)
raise Exception("Crap damn crap damn crap damn")
# Pretend to be a producer, too.
def _conswrite(self, bytes):
try:
self._cons.write(bytes)
except:
self._onConnLost.errback()
def dataReceived(self, bytes):
if self._cons is not None:
self._conswrite(bytes)
else:
self._buffer.append(bytes)
def _unregConsumer(self, ignored):
self._cons.unregisterProducer()
self._cons = None
del self._onConnLost
return ignored
def registerConsumer(self, cons):
assert self._cons is None
self._cons = cons
self._cons.registerProducer(self, True)
for chunk in self._buffer:
self._conswrite(chunk)
self._buffer = None
if self.isConnected:
self._onConnLost = d = defer.Deferred()
d.addBoth(self._unregConsumer)
return d
else:
self._cons.unregisterProducer()
self._cons = None
return defer.succeed(None)
def resumeProducing(self):
self.transport.resumeProducing()
def pauseProducing(self):
self.transport.pauseProducing()
def stopProducing(self):
self.transport.stopProducing()
class DTPFactory(protocol.ClientFactory):
"""
Client factory for I{data transfer process} protocols.
@ivar peerCheck: perform checks to make sure the ftp-pi's peer is the same
as the dtp's
@ivar pi: a reference to this factory's protocol interpreter
@ivar _state: Indicates the current state of the DTPFactory. Initially,
this is L{_IN_PROGRESS}. If the connection fails or times out, it is
L{_FAILED}. If the connection succeeds before the timeout, it is
L{_FINISHED}.
"""
_IN_PROGRESS = object()
_FAILED = object()
_FINISHED = object()
_state = _IN_PROGRESS
# -- configuration variables --
peerCheck = False
# -- class variables --
def __init__(self, pi, peerHost=None, reactor=None):
"""
Constructor
@param pi: this factory's protocol interpreter
@param peerHost: if peerCheck is True, this is the tuple that the
generated instance will use to perform security checks
"""
self.pi = pi # the protocol interpreter that is using this factory
self.peerHost = peerHost # the from FTP.transport.peerHost()
self.deferred = defer.Deferred() # deferred will fire when instance is connected
self.delayedCall = None
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
def buildProtocol(self, addr):
log.msg('DTPFactory.buildProtocol', debug=True)
if self._state is not self._IN_PROGRESS:
return None
self._state = self._FINISHED
self.cancelTimeout()
p = DTP()
p.factory = self
p.pi = self.pi
self.pi.dtpInstance = p
return p
def stopFactory(self):
log.msg('dtpFactory.stopFactory', debug=True)
self.cancelTimeout()
def timeoutFactory(self):
log.msg('timed out waiting for DTP connection')
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(
PortConnectionError(defer.TimeoutError("DTPFactory timeout")))
def cancelTimeout(self):
if self.delayedCall is not None and self.delayedCall.active():
log.msg('cancelling DTP timeout', debug=True)
self.delayedCall.cancel()
def setTimeout(self, seconds):
log.msg('DTPFactory.setTimeout set to %s seconds' % seconds)
self.delayedCall = self._reactor.callLater(seconds, self.timeoutFactory)
def clientConnectionFailed(self, connector, reason):
if self._state is not self._IN_PROGRESS:
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
d.errback(PortConnectionError(reason))
# -- FTP-PI (Protocol Interpreter) --
class ASCIIConsumerWrapper(object):
def __init__(self, cons):
self.cons = cons
self.registerProducer = cons.registerProducer
self.unregisterProducer = cons.unregisterProducer
assert os.linesep == "\r\n" or len(os.linesep) == 1, "Unsupported platform (yea right like this even exists)"
if os.linesep == "\r\n":
self.write = cons.write
def write(self, bytes):
return self.cons.write(bytes.replace(os.linesep, "\r\n"))
class FileConsumer(object):
"""
A consumer for FTP input that writes data to a file.
@ivar fObj: a file object opened for writing, used to write data received.
@type fObj: C{file}
"""
implements(interfaces.IConsumer)
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
assert streaming
def unregisterProducer(self):
self.producer = None
self.fObj.close()
def write(self, bytes):
self.fObj.write(bytes)
class FTPOverflowProtocol(basic.LineReceiver):
"""FTP mini-protocol for when there are too many connections."""
def connectionMade(self):
self.sendLine(RESPONSE[TOO_MANY_CONNECTIONS])
self.transport.loseConnection()
class FTP(object, basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol Interpreter for the File Transfer Protocol
@ivar state: The current server state. One of L{UNAUTH},
L{INAUTH}, L{AUTHED}, L{RENAMING}.
@ivar shell: The connected avatar
@ivar binary: The transfer mode. If false, ASCII.
@ivar dtpFactory: Generates a single DTP for this session
@ivar dtpPort: Port returned from listenTCP
@ivar listenFactory: A callable with the signature of
L{twisted.internet.interfaces.IReactorTCP.listenTCP} which will be used
to create Ports for passive connections (mainly for testing).
@ivar passivePortRange: iterator used as source of passive port numbers.
@type passivePortRange: C{iterator}
"""
disconnected = False
# States an FTP can be in
UNAUTH, INAUTH, AUTHED, RENAMING = range(4)
# how long the DTP waits for a connection
dtpTimeout = 10
portal = None
shell = None
dtpFactory = None
dtpPort = None
dtpInstance = None
binary = True
PUBLIC_COMMANDS = ['FEAT', 'QUIT']
FEATURES = ['FEAT', 'MDTM', 'PASV', 'SIZE', 'TYPE A;I']
passivePortRange = xrange(0, 1)
listenFactory = reactor.listenTCP
def reply(self, key, *args):
msg = RESPONSE[key] % args
self.sendLine(msg)
def connectionMade(self):
self.state = self.UNAUTH
self.setTimeout(self.timeOut)
self.reply(WELCOME_MSG, self.factory.welcomeMessage)
def connectionLost(self, reason):
# if we have a DTP protocol instance running and
# we lose connection to the client's PI, kill the
# DTP connection and close the port
if self.dtpFactory:
self.cleanupDTP()
self.setTimeout(None)
if hasattr(self.shell, 'logout') and self.shell.logout is not None:
self.shell.logout()
self.shell = None
self.transport = None
def timeoutConnection(self):
self.transport.loseConnection()
def lineReceived(self, line):
self.resetTimeout()
self.pauseProducing()
def processFailed(err):
if err.check(FTPCmdError):
self.sendLine(err.value.response())
elif (err.check(TypeError) and
err.value.args[0].find('takes exactly') != -1):
self.reply(SYNTAX_ERR, "%s requires an argument." % (cmd,))
else:
log.msg("Unexpected FTP error")
log.err(err)
self.reply(REQ_ACTN_NOT_TAKEN, "internal server error")
def processSucceeded(result):
if isinstance(result, tuple):
self.reply(*result)
elif result is not None:
self.reply(result)
def allDone(ignored):
if not self.disconnected:
self.resumeProducing()
spaceIndex = line.find(' ')
if spaceIndex != -1:
cmd = line[:spaceIndex]
args = (line[spaceIndex + 1:],)
else:
cmd = line
args = ()
d = defer.maybeDeferred(self.processCommand, cmd, *args)
d.addCallbacks(processSucceeded, processFailed)
d.addErrback(log.err)
# XXX It burnsss
# LineReceiver doesn't let you resumeProducing inside
# lineReceived atm
from twisted.internet import reactor
reactor.callLater(0, d.addBoth, allDone)
def processCommand(self, cmd, *params):
def call_ftp_command(command):
method = getattr(self, "ftp_" + command, None)
if method is not None:
return method(*params)
return defer.fail(CmdNotImplementedError(command))
cmd = cmd.upper()
if cmd in self.PUBLIC_COMMANDS:
return call_ftp_command(cmd)
elif self.state == self.UNAUTH:
if cmd == 'USER':
return self.ftp_USER(*params)
elif cmd == 'PASS':
return BAD_CMD_SEQ, "USER required before PASS"
else:
return NOT_LOGGED_IN
elif self.state == self.INAUTH:
if cmd == 'PASS':
return self.ftp_PASS(*params)
else:
return BAD_CMD_SEQ, "PASS required after USER"
elif self.state == self.AUTHED:
return call_ftp_command(cmd)
elif self.state == self.RENAMING:
if cmd == 'RNTO':
return self.ftp_RNTO(*params)
else:
return BAD_CMD_SEQ, "RNTO required after RNFR"
def getDTPPort(self, factory):
"""
Return a port for passive access, using C{self.passivePortRange}
attribute.
"""
for portn in self.passivePortRange:
try:
dtpPort = self.listenFactory(portn, factory)
except error.CannotListenError:
continue
else:
return dtpPort
raise error.CannotListenError('', portn,
"No port available in range %s" %
(self.passivePortRange,))
def ftp_USER(self, username):
"""
First part of login. Get the username the peer wants to
authenticate as.
"""
if not username:
return defer.fail(CmdSyntaxError('USER requires an argument'))
self._user = username
self.state = self.INAUTH
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
return GUEST_NAME_OK_NEED_EMAIL
else:
return (USR_NAME_OK_NEED_PASS, username)
# TODO: add max auth try before timeout from ip...
# TODO: need to implement minimal ABOR command
def ftp_PASS(self, password):
"""
Second part of login. Get the password the peer wants to
authenticate with.
"""
if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
# anonymous login
creds = credentials.Anonymous()
reply = GUEST_LOGGED_IN_PROCEED
else:
# user login
creds = credentials.UsernamePassword(self._user, password)
reply = USR_LOGGED_IN_PROCEED
del self._user
def _cbLogin((interface, avatar, logout)):
assert interface is IFTPShell, "The realm is busted, jerk."
self.shell = avatar
self.logout = logout
self.workingDirectory = []
self.state = self.AUTHED
return reply
def _ebLogin(failure):
failure.trap(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials)
self.state = self.UNAUTH
raise AuthorizationError
d = self.portal.login(creds, None, IFTPShell)
d.addCallbacks(_cbLogin, _ebLogin)
return d
def ftp_PASV(self):
"""
Request for a passive connection
from the rfc::
This command requests the server-DTP to \"listen\" on a data port
(which is not its default data port) and to wait for a connection
rather than initiate one upon receipt of a transfer command. The
response to this command includes the host and port address this
server is listening on.
"""
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
# cleanupDTP sets dtpFactory to none. Later we'll do
# cleanup here or something.
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = self.getDTPPort(self.dtpFactory)
host = self.transport.getHost().host
port = self.dtpPort.getHost().port
self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port))
return self.dtpFactory.deferred.addCallback(lambda ign: None)
def ftp_PORT(self, address):
addr = map(int, address.split(','))
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = addr[4] << 8 | addr[5]
# if we have a DTP port set up, lose it.
if self.dtpFactory is not None:
self.cleanupDTP()
self.dtpFactory = DTPFactory(pi=self, peerHost=self.transport.getPeer().host)
self.dtpFactory.setTimeout(self.dtpTimeout)
self.dtpPort = reactor.connectTCP(ip, port, self.dtpFactory)
def connected(ignored):
return ENTERING_PORT_MODE
def connFailed(err):
err.trap(PortConnectionError)
return CANT_OPEN_DATA_CNX
return self.dtpFactory.deferred.addCallbacks(connected, connFailed)
def _encodeName(self, name):
"""
Encode C{name} to be sent over the wire.
This encodes L{unicode} objects as UTF-8 and leaves L{bytes} as-is.
As described by U{RFC 3659 section
2.2<https://tools.ietf.org/html/rfc3659#section-2.2>}::
Various FTP commands take pathnames as arguments, or return
pathnames in responses. When the MLST command is supported, as
indicated in the response to the FEAT command, pathnames are to be
transferred in one of the following two formats.
pathname = utf-8-name / raw
utf-8-name = <a UTF-8 encoded Unicode string>
raw = <any string that is not a valid UTF-8 encoding>
Which format is used is at the option of the user-PI or server-PI
sending the pathname.
@param name: Name to be encoded.
@type name: L{bytes} or L{unicode}
@return: Wire format of C{name}.
@rtype: L{bytes}
"""
if isinstance(name, unicode):
return name.encode('utf-8')
return name
def ftp_LIST(self, path=''):
""" This command causes a list to be sent from the server to the
passive DTP. If the pathname specifies a directory or other
group of files, the server should transfer a list of files
in the specified directory. If the pathname specifies a
file then the server should send current information on the
file. A null argument implies the user's current working or
default directory.
"""
# Uh, for now, do this retarded thing.
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(BadCmdSequenceError('must send PORT or PASV before RETR'))
# Various clients send flags like -L or -al etc. We just ignore them.
if path.lower() in ['-a', '-l', '-la', '-al']:
path = ''
def gotListing(results):
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, attrs) in results:
name = self._encodeName(name)
self.dtpInstance.sendListResponse(name, attrs)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
d = self.shell.list(
segments,
('size', 'directory', 'permissions', 'hardlinks',
'modified', 'owner', 'group'))
d.addCallback(gotListing)
return d
def ftp_NLST(self, path):
"""
This command causes a directory listing to be sent from the server to
the client. The pathname should specify a directory or other
system-specific file group descriptor. An empty path implies the current
working directory. If the path is non-existent, send nothing. If the
path is to a file, send only the file name.
@type path: C{str}
@param path: The path for which a directory listing should be returned.
@rtype: L{Deferred}
@return: a L{Deferred} which will be fired when the listing request
is finished.
"""
# XXX: why is this check different from ftp_RETR/ftp_STOR? See #4180
if self.dtpInstance is None or not self.dtpInstance.isConnected:
return defer.fail(
BadCmdSequenceError('must send PORT or PASV before RETR'))
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbList(results, glob):
"""
Send, line by line, each matching file in the directory listing, and
then close the connection.
@type results: A C{list} of C{tuple}. The first element of each
C{tuple} is a C{str} and the second element is a C{list}.
@param results: The names of the files in the directory.
@param glob: A shell-style glob through which to filter results (see
U{http://docs.python.org/2/library/fnmatch.html}), or C{None}
for no filtering.
@type glob: L{str} or L{NoneType}
@return: A C{tuple} containing the status code for a successful
transfer.
@rtype: C{tuple}
"""
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
if not glob or (glob and fnmatch.fnmatch(name, glob)):
name = self._encodeName(name)
self.dtpInstance.sendLine(name)
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
def listErr(results):
"""
RFC 959 specifies that an NLST request may only return directory
listings. Thus, send nothing and just close the connection.
@type results: L{Failure}
@param results: The L{Failure} wrapping a L{FileNotFoundError} that
occurred while trying to list the contents of a nonexistent
directory.
@returns: A C{tuple} containing the status code for a successful
transfer.
@rtype: C{tuple}
"""
self.dtpInstance.transport.loseConnection()
return (TXFR_COMPLETE_OK,)
if _isGlobbingExpression(segments):
# Remove globbing expression from path
# and keep to be used for filtering.
glob = segments.pop()
else:
glob = None
d = self.shell.list(segments)
d.addCallback(cbList, glob)
# self.shell.list will generate an error if the path is invalid
d.addErrback(listErr)
return d
def ftp_CWD(self, path):
try:
segments = toSegments(self.workingDirectory, path)
except InvalidPath:
# XXX Eh, what to fail with here?
return defer.fail(FileNotFoundError(path))
def accessGranted(result):
self.workingDirectory = segments
return (REQ_FILE_ACTN_COMPLETED_OK,)
return self.shell.access(segments).addCallback(accessGranted)
def ftp_CDUP(self):
return self.ftp_CWD('..')
def ftp_PWD(self):
return (PWD_REPLY, '/' + '/'.join(self.workingDirectory))
def ftp_RETR(self, path):
"""
This command causes the content of a file to be sent over the data
transfer channel. If the path is to a folder, an error will be raised.
@type path: C{str}
@param path: The path to the file which should be transferred over the
data transfer channel.
@rtype: L{Deferred}
@return: a L{Deferred} which will be fired when the transfer is done.
"""
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before RETR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
# And away she goes
if not self.binary:
cons = ASCIIConsumerWrapper(self.dtpInstance)
else:
cons = self.dtpInstance
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
log.msg("Unexpected error attempting to transmit file to client:")
log.err(err)
if err.check(FTPCmdError):
return err
return (CNX_CLOSED_TXFR_ABORTED,)
def cbOpened(file):
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
d = file.send(cons)
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
if not err.check(PermissionDeniedError, FileNotFoundError, IsADirectoryError):
log.msg("Unexpected error attempting to open file for transmission:")
log.err(err)
if err.check(FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
return (FILE_NOT_FOUND, '/'.join(newsegs))
d = self.shell.openForReading(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_STOR(self, path):
"""
STORE (STOR)
This command causes the server-DTP to accept the data
transferred via the data connection and to store the data as
a file at the server site. If the file specified in the
pathname exists at the server site, then its contents shall
be replaced by the data being transferred. A new file is
created at the server site if the file specified in the
pathname does not already exist.
"""
if self.dtpInstance is None:
raise BadCmdSequenceError('PORT or PASV required before STOR')
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
# XXX For now, just disable the timeout. Later we'll want to
# leave it active and have the DTP connection reset it
# periodically.
self.setTimeout(None)
# Put it back later
def enableTimeout(result):
self.setTimeout(self.factory.timeOut)
return result
def cbOpened(file):
"""
File was open for reading. Launch the data transfer channel via
the file consumer.
"""
d = file.receive()
d.addCallback(cbConsumer)
d.addCallback(lambda ignored: file.close())
d.addCallbacks(cbSent, ebSent)
return d
def ebOpened(err):
"""
Called when failed to open the file for reading.
For known errors, return the FTP error code.
For all other, return a file not found error.
"""
if isinstance(err.value, FTPCmdError):
return (err.value.errorCode, '/'.join(newsegs))
log.err(err, "Unexpected error received while opening file:")
return (FILE_NOT_FOUND, '/'.join(newsegs))
def cbConsumer(cons):
"""
Called after the file was opended for reading.
Prepare the data transfer channel and send the response
to the command channel.
"""
if not self.binary:
cons = ASCIIConsumerWrapper(cons)
d = self.dtpInstance.registerConsumer(cons)
# Tell them what to doooo
if self.dtpInstance.isConnected:
self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
return d
def cbSent(result):
"""
Called from data transport when tranfer is done.
"""
return (TXFR_COMPLETE_OK,)
def ebSent(err):
"""
Called from data transport when there are errors during the
transfer.
"""
log.err(err, "Unexpected error received during transfer:")
if err.check(FTPCmdError):
return err
return (CNX_CLOSED_TXFR_ABORTED,)
d = self.shell.openForWriting(newsegs)
d.addCallbacks(cbOpened, ebOpened)
d.addBoth(enableTimeout)
# Pass back Deferred that fires when the transfer is done
return d
def ftp_SIZE(self, path):
"""
File SIZE
The FTP command, SIZE OF FILE (SIZE), is used to obtain the transfer
size of a file from the server-FTP process. This is the exact number
of octets (8 bit bytes) that would be transmitted over the data
connection should that file be transmitted. This value will change
depending on the current STRUcture, MODE, and TYPE of the data
connection or of a data connection that would be created were one
created now. Thus, the result of the SIZE command is dependent on
the currently established STRU, MODE, and TYPE parameters.
The SIZE command returns how many octets would be transferred if the
file were to be transferred using the current transfer structure,
mode, and type. This command is normally used in conjunction with
the RESTART (REST) command when STORing a file to a remote server in
STREAM mode, to determine the restart point. The server-PI might
need to read the partially transferred file, do any appropriate
conversion, and count the number of octets that would be generated
when sending the file in order to correctly respond to this command.
Estimates of the file transfer size MUST NOT be returned; only
precise information is acceptable.
http://tools.ietf.org/html/rfc3659
"""
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((size,)):
return (FILE_STATUS, str(size))
return self.shell.stat(newsegs, ('size',)).addCallback(cbStat)
def ftp_MDTM(self, path):
"""
File Modification Time (MDTM)
The FTP command, MODIFICATION TIME (MDTM), can be used to determine
when a file in the server NVFS was last modified. This command has
existed in many FTP servers for many years, as an adjunct to the REST
command for STREAM mode, thus is widely available. However, where
supported, the "modify" fact that can be provided in the result from
the new MLST command is recommended as a superior alternative.
http://tools.ietf.org/html/rfc3659
"""
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
def cbStat((modified,)):
return (FILE_STATUS, time.strftime('%Y%m%d%H%M%S', time.gmtime(modified)))
return self.shell.stat(newsegs, ('modified',)).addCallback(cbStat)
def ftp_TYPE(self, type):
"""
REPRESENTATION TYPE (TYPE)
The argument specifies the representation type as described
in the Section on Data Representation and Storage. Several
types take a second parameter. The first parameter is
denoted by a single Telnet character, as is the second
Format parameter for ASCII and EBCDIC; the second parameter
for local byte is a decimal integer to indicate Bytesize.
The parameters are separated by a <SP> (Space, ASCII code
32).
"""
p = type.upper()
if p:
f = getattr(self, 'type_' + p[0], None)
if f is not None:
return f(p[1:])
return self.type_UNKNOWN(p)
return (SYNTAX_ERR,)
def type_A(self, code):
if code == '' or code == 'N':
self.binary = False
return (TYPE_SET_OK, 'A' + code)
else:
return defer.fail(CmdArgSyntaxError(code))
def type_I(self, code):
if code == '':
self.binary = True
return (TYPE_SET_OK, 'I')
else:
return defer.fail(CmdArgSyntaxError(code))
def type_UNKNOWN(self, code):
return defer.fail(CmdNotImplementedForArgError(code))
def ftp_SYST(self):
return NAME_SYS_TYPE
def ftp_STRU(self, structure):
p = structure.upper()
if p == 'F':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(structure))
def ftp_MODE(self, mode):
p = mode.upper()
if p == 'S':
return (CMD_OK,)
return defer.fail(CmdNotImplementedForArgError(mode))
def ftp_MKD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.makeDirectory(newsegs).addCallback(lambda ign: (MKD_REPLY, path))
def ftp_RMD(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeDirectory(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_DELE(self, path):
try:
newsegs = toSegments(self.workingDirectory, path)
except InvalidPath:
return defer.fail(FileNotFoundError(path))
return self.shell.removeFile(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_NOOP(self):
return (CMD_OK,)
def ftp_RNFR(self, fromName):
self._fromName = fromName
self.state = self.RENAMING
return (REQ_FILE_ACTN_PENDING_FURTHER_INFO,)
def ftp_RNTO(self, toName):
fromName = self._fromName
del self._fromName
self.state = self.AUTHED
try:
fromsegs = toSegments(self.workingDirectory, fromName)
tosegs = toSegments(self.workingDirectory, toName)
except InvalidPath:
return defer.fail(FileNotFoundError(fromName))
return self.shell.rename(fromsegs, tosegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
def ftp_FEAT(self):
"""
Advertise the features supported by the server.
http://tools.ietf.org/html/rfc2389
"""
self.sendLine(RESPONSE[FEAT_OK][0])
for feature in self.FEATURES:
self.sendLine(' ' + feature)
self.sendLine(RESPONSE[FEAT_OK][1])
def ftp_OPTS(self, option):
"""
Handle OPTS command.
http://tools.ietf.org/html/draft-ietf-ftpext-utf-8-option-00
"""
return self.reply(OPTS_NOT_IMPLEMENTED, option)
def ftp_QUIT(self):
self.reply(GOODBYE_MSG)
self.transport.loseConnection()
self.disconnected = True
def cleanupDTP(self):
"""
Call when DTP connection exits
"""
log.msg('cleanupDTP', debug=True)
log.msg(self.dtpPort)
dtpPort, self.dtpPort = self.dtpPort, None
if interfaces.IListeningPort.providedBy(dtpPort):
dtpPort.stopListening()
elif interfaces.IConnector.providedBy(dtpPort):
dtpPort.disconnect()
else:
assert False, "dtpPort should be an IListeningPort or IConnector, instead is %r" % (dtpPort,)
self.dtpFactory.stopFactory()
self.dtpFactory = None
if self.dtpInstance is not None:
self.dtpInstance = None
class FTPFactory(policies.LimitTotalConnectionsFactory):
"""
A factory for producing ftp protocol instances
@ivar timeOut: the protocol interpreter's idle timeout time in seconds,
default is 600 seconds.
@ivar passivePortRange: value forwarded to C{protocol.passivePortRange}.
@type passivePortRange: C{iterator}
"""
protocol = FTP
overflowProtocol = FTPOverflowProtocol
allowAnonymous = True
userAnonymous = 'anonymous'
timeOut = 600
welcomeMessage = "Twisted %s FTP Server" % (copyright.version,)
passivePortRange = xrange(0, 1)
def __init__(self, portal=None, userAnonymous='anonymous'):
self.portal = portal
self.userAnonymous = userAnonymous
self.instances = []
def buildProtocol(self, addr):
p = policies.LimitTotalConnectionsFactory.buildProtocol(self, addr)
if p is not None:
p.wrappedProtocol.portal = self.portal
p.wrappedProtocol.timeOut = self.timeOut
p.wrappedProtocol.passivePortRange = self.passivePortRange
return p
def stopFactory(self):
# make sure ftp instance's timeouts are set to None
# to avoid reactor complaints
[p.setTimeout(None) for p in self.instances if p.timeOut is not None]
policies.LimitTotalConnectionsFactory.stopFactory(self)
# -- Cred Objects --
class IFTPShell(Interface):
"""
An abstraction of the shell commands used by the FTP protocol for
a given user account.
All path names must be absolute.
"""
def makeDirectory(path):
"""
Create a directory.
@param path: The path, as a list of segments, to create
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
created, or which fails if the directory cannot be created.
"""
def removeDirectory(path):
"""
Remove a directory.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the directory has been
removed, or which fails if the directory cannot be removed.
"""
def removeFile(path):
"""
Remove a file.
@param path: The path, as a list of segments, to remove
@type path: C{list} of C{unicode}
@return: A Deferred which fires when the file has been
removed, or which fails if the file cannot be removed.
"""
def rename(fromPath, toPath):
"""
Rename a file or directory.
@param fromPath: The current name of the path.
@type fromPath: C{list} of C{unicode}
@param toPath: The desired new name of the path.
@type toPath: C{list} of C{unicode}
@return: A Deferred which fires when the path has been
renamed, or which fails if the path cannot be renamed.
"""
def access(path):
"""
Determine whether access to the given path is allowed.
@param path: The path, as a list of segments
@return: A Deferred which fires with None if access is allowed
or which fails with a specific exception type if access is
denied.
"""
def stat(path, keys=()):
"""
Retrieve information about the given path.
This is like list, except it will never return results about
child paths.
"""
def list(path, keys=()):
"""
Retrieve information about the given path.
If the path represents a non-directory, the result list should
have only one entry with information about that non-directory.
Otherwise, the result list should have an element for each
child of the directory.
@param path: The path, as a list of segments, to list
@type path: C{list} of C{unicode} or C{bytes}
@param keys: A tuple of keys desired in the resulting
dictionaries.
@return: A Deferred which fires with a list of (name, list),
where the name is the name of the entry as a unicode string or
bytes and each list contains values corresponding to the requested
keys. The following are possible elements of keys, and the
values which should be returned for them:
- C{'size'}: size in bytes, as an integer (this is kinda required)
- C{'directory'}: boolean indicating the type of this entry
- C{'permissions'}: a bitvector (see os.stat(foo).st_mode)
- C{'hardlinks'}: Number of hard links to this entry
- C{'modified'}: number of seconds since the epoch since entry was
modified
- C{'owner'}: string indicating the user owner of this entry
- C{'group'}: string indicating the group owner of this entry
"""
def openForReading(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IReadFile}
"""
def openForWriting(path):
"""
@param path: The path, as a list of segments, to open
@type path: C{list} of C{unicode}
@rtype: C{Deferred} which will fire with L{IWriteFile}
"""
class IReadFile(Interface):
"""
A file out of which bytes may be read.
"""
def send(consumer):
"""
Produce the contents of the given path to the given consumer. This
method may only be invoked once on each provider.
@type consumer: C{IConsumer}
@return: A Deferred which fires when the file has been
consumed completely.
"""
class IWriteFile(Interface):
"""
A file into which bytes may be written.
"""
def receive():
"""
Create a consumer which will write to this file. This method may
only be invoked once on each provider.
@rtype: C{Deferred} of C{IConsumer}
"""
def close():
"""
Perform any post-write work that needs to be done. This method may
only be invoked once on each provider, and will always be invoked
after receive().
@rtype: C{Deferred} of anything: the value is ignored. The FTP client
will not see their upload request complete until this Deferred has
been fired.
"""
def _getgroups(uid):
"""
Return the primary and supplementary groups for the given UID.
@type uid: C{int}
"""
result = []
pwent = pwd.getpwuid(uid)
result.append(pwent.pw_gid)
for grent in grp.getgrall():
if pwent.pw_name in grent.gr_mem:
result.append(grent.gr_gid)
return result
def _testPermissions(uid, gid, spath, mode='r'):
"""
checks to see if uid has proper permissions to access path with mode
@type uid: C{int}
@param uid: numeric user id
@type gid: C{int}
@param gid: numeric group id
@type spath: C{str}
@param spath: the path on the server to test
@type mode: C{str}
@param mode: 'r' or 'w' (read or write)
@rtype: C{bool}
@return: True if the given credentials have the specified form of
access to the given path
"""
if mode == 'r':
usr = stat.S_IRUSR
grp = stat.S_IRGRP
oth = stat.S_IROTH
amode = os.R_OK
elif mode == 'w':
usr = stat.S_IWUSR
grp = stat.S_IWGRP
oth = stat.S_IWOTH
amode = os.W_OK
else:
raise ValueError("Invalid mode %r: must specify 'r' or 'w'" % (mode,))
access = False
if os.path.exists(spath):
if uid == 0:
access = True
else:
s = os.stat(spath)
if usr & s.st_mode and uid == s.st_uid:
access = True
elif grp & s.st_mode and gid in _getgroups(uid):
access = True
elif oth & s.st_mode:
access = True
if access:
if not os.access(spath, amode):
access = False
log.msg("Filesystem grants permission to UID %d but it is inaccessible to me running as UID %d" % (
uid, os.getuid()))
return access
class FTPAnonymousShell(object):
"""
An anonymous implementation of IFTPShell
@type filesystemRoot: L{twisted.python.filepath.FilePath}
@ivar filesystemRoot: The path which is considered the root of
this shell.
"""
implements(IFTPShell)
def __init__(self, filesystemRoot):
self.filesystemRoot = filesystemRoot
def _path(self, path):
return self.filesystemRoot.descendant(path)
def makeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeDirectory(self, path):
return defer.fail(AnonUserDeniedError())
def removeFile(self, path):
return defer.fail(AnonUserDeniedError())
def rename(self, fromPath, toPath):
return defer.fail(AnonUserDeniedError())
def receive(self, path):
path = self._path(path)
return defer.fail(AnonUserDeniedError())
def openForReading(self, path):
"""
Open C{path} for reading.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IReadFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
f = p.open('r')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(_FileReader(f))
def openForWriting(self, path):
"""
Reject write attempts by anonymous users with
L{PermissionDeniedError}.
"""
return defer.fail(PermissionDeniedError("STOR not allowed"))
def access(self, path):
p = self._path(path)
if not p.exists():
# Again, win32 doesn't report a sane error after, so let's fail
# early if we can
return defer.fail(FileNotFoundError(path))
# For now, just see if we can os.listdir() it
try:
p.listdir()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def stat(self, path, keys=()):
p = self._path(path)
if p.isdir():
try:
statResult = self._statNode(p, keys)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(statResult)
else:
return self.list(path, keys).addCallback(lambda res: res[0][1])
def list(self, path, keys=()):
"""
Return the list of files at given C{path}, adding C{keys} stat
informations if specified.
@param path: the directory or file to check.
@type path: C{str}
@param keys: the list of desired metadata
@type keys: C{list} of C{str}
"""
filePath = self._path(path)
if filePath.isdir():
entries = filePath.listdir()
fileEntries = [filePath.child(p) for p in entries]
elif filePath.isfile():
entries = [os.path.join(*filePath.segmentsFrom(self.filesystemRoot))]
fileEntries = [filePath]
else:
return defer.fail(FileNotFoundError(path))
results = []
for fileName, filePath in zip(entries, fileEntries):
ent = []
results.append((fileName, ent))
if keys:
try:
ent.extend(self._statNode(filePath, keys))
except (IOError, OSError), e:
return errnoToFailure(e.errno, fileName)
except:
return defer.fail()
return defer.succeed(results)
def _statNode(self, filePath, keys):
"""
Shortcut method to get stat info on a node.
@param filePath: the node to stat.
@type filePath: C{filepath.FilePath}
@param keys: the stat keys to get.
@type keys: C{iterable}
"""
filePath.restat()
return [getattr(self, '_stat_' + k)(filePath.statinfo) for k in keys]
_stat_size = operator.attrgetter('st_size')
_stat_permissions = operator.attrgetter('st_mode')
_stat_hardlinks = operator.attrgetter('st_nlink')
_stat_modified = operator.attrgetter('st_mtime')
def _stat_owner(self, st):
if pwd is not None:
try:
return pwd.getpwuid(st.st_uid)[0]
except KeyError:
pass
return str(st.st_uid)
def _stat_group(self, st):
if grp is not None:
try:
return grp.getgrgid(st.st_gid)[0]
except KeyError:
pass
return str(st.st_gid)
def _stat_directory(self, st):
return bool(st.st_mode & stat.S_IFDIR)
class _FileReader(object):
implements(IReadFile)
def __init__(self, fObj):
self.fObj = fObj
self._send = False
def _close(self, passthrough):
self._send = True
self.fObj.close()
return passthrough
def send(self, consumer):
assert not self._send, "Can only call IReadFile.send *once* per instance"
self._send = True
d = basic.FileSender().beginFileTransfer(self.fObj, consumer)
d.addBoth(self._close)
return d
class FTPShell(FTPAnonymousShell):
"""
An authenticated implementation of L{IFTPShell}.
"""
def makeDirectory(self, path):
p = self._path(path)
try:
p.makedirs()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeDirectory(self, path):
p = self._path(path)
if p.isfile():
# Win32 returns the wrong errno when rmdir is called on a file
# instead of a directory, so as we have the info here, let's fail
# early with a pertinent error
return defer.fail(IsNotADirectoryError(path))
try:
os.rmdir(p.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def removeFile(self, path):
p = self._path(path)
if p.isdir():
# Win32 returns the wrong errno when remove is called on a
# directory instead of a file, so as we have the info here,
# let's fail early with a pertinent error
return defer.fail(IsADirectoryError(path))
try:
p.remove()
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
else:
return defer.succeed(None)
def rename(self, fromPath, toPath):
fp = self._path(fromPath)
tp = self._path(toPath)
try:
os.rename(fp.path, tp.path)
except (IOError, OSError), e:
return errnoToFailure(e.errno, fromPath)
except:
return defer.fail()
else:
return defer.succeed(None)
def openForWriting(self, path):
"""
Open C{path} for writing.
@param path: The path, as a list of segments, to open.
@type path: C{list} of C{unicode}
@return: A L{Deferred} is returned that will fire with an object
implementing L{IWriteFile} if the file is successfully opened. If
C{path} is a directory, or if an exception is raised while trying
to open the file, the L{Deferred} will fire with an error.
"""
p = self._path(path)
if p.isdir():
# Normally, we would only check for EISDIR in open, but win32
# returns EACCES in this case, so we check before
return defer.fail(IsADirectoryError(path))
try:
fObj = p.open('w')
except (IOError, OSError), e:
return errnoToFailure(e.errno, path)
except:
return defer.fail()
return defer.succeed(_FileWriter(fObj))
class _FileWriter(object):
implements(IWriteFile)
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
self._receive = True
# FileConsumer will close the file object
return defer.succeed(FileConsumer(self.fObj))
def close(self):
return defer.succeed(None)
class BaseFTPRealm:
"""
Base class for simple FTP realms which provides an easy hook for specifying
the home directory for each user.
"""
implements(portal.IRealm)
def __init__(self, anonymousRoot):
self.anonymousRoot = filepath.FilePath(anonymousRoot)
def getHomeDirectory(self, avatarId):
"""
Return a L{FilePath} representing the home directory of the given
avatar. Override this in a subclass.
@param avatarId: A user identifier returned from a credentials checker.
@type avatarId: C{str}
@rtype: L{FilePath}
"""
raise NotImplementedError(
"%r did not override getHomeDirectory" % (self.__class__,))
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is IFTPShell:
if avatarId is checkers.ANONYMOUS:
avatar = FTPAnonymousShell(self.anonymousRoot)
else:
avatar = FTPShell(self.getHomeDirectory(avatarId))
return (IFTPShell, avatar,
getattr(avatar, 'logout', lambda: None))
raise NotImplementedError(
"Only IFTPShell interface is supported by this realm")
class FTPRealm(BaseFTPRealm):
"""
@type anonymousRoot: L{twisted.python.filepath.FilePath}
@ivar anonymousRoot: Root of the filesystem to which anonymous
users will be granted access.
@type userHome: L{filepath.FilePath}
@ivar userHome: Root of the filesystem containing user home directories.
"""
def __init__(self, anonymousRoot, userHome='/home'):
BaseFTPRealm.__init__(self, anonymousRoot)
self.userHome = filepath.FilePath(userHome)
def getHomeDirectory(self, avatarId):
"""
Use C{avatarId} as a single path segment to construct a child of
C{self.userHome} and return that child.
"""
return self.userHome.child(avatarId)
class SystemFTPRealm(BaseFTPRealm):
"""
L{SystemFTPRealm} uses system user account information to decide what the
home directory for a particular avatarId is.
This works on POSIX but probably is not reliable on Windows.
"""
def getHomeDirectory(self, avatarId):
"""
Return the system-defined home directory of the system user account with
the name C{avatarId}.
"""
path = os.path.expanduser('~' + avatarId)
if path.startswith('~'):
raise cred_error.UnauthorizedLogin()
return filepath.FilePath(path)
# --- FTP CLIENT -------------------------------------------------------------
####
# And now for the client...
# Notes:
# * Reference: http://cr.yp.to/ftp.html
# * FIXME: Does not support pipelining (which is not supported by all
# servers anyway). This isn't a functionality limitation, just a
# small performance issue.
# * Only has a rudimentary understanding of FTP response codes (although
# the full response is passed to the caller if they so choose).
# * Assumes that USER and PASS should always be sent
# * Always sets TYPE I (binary mode)
# * Doesn't understand any of the weird, obscure TELNET stuff (\377...)
# * FIXME: Doesn't share any code with the FTPServer
class ConnectionLost(FTPError):
pass
class CommandFailed(FTPError):
pass
class BadResponse(FTPError):
pass
class UnexpectedResponse(FTPError):
pass
class UnexpectedData(FTPError):
pass
class FTPCommand:
def __init__(self, text=None, public=0):
self.text = text
self.deferred = defer.Deferred()
self.ready = 1
self.public = public
self.transferDeferred = None
def fail(self, failure):
if self.public:
self.deferred.errback(failure)
class ProtocolWrapper(protocol.Protocol):
def __init__(self, original, deferred):
self.original = original
self.deferred = deferred
def makeConnection(self, transport):
self.original.makeConnection(transport)
def dataReceived(self, data):
self.original.dataReceived(data)
def connectionLost(self, reason):
self.original.connectionLost(reason)
# Signal that transfer has completed
self.deferred.callback(None)
class IFinishableConsumer(interfaces.IConsumer):
"""
A Consumer for producers that finish.
@since: 11.0
"""
def finish():
"""
The producer has finished producing.
"""
class SenderProtocol(protocol.Protocol):
implements(IFinishableConsumer)
def __init__(self):
# Fired upon connection
self.connectedDeferred = defer.Deferred()
# Fired upon disconnection
self.deferred = defer.Deferred()
#Protocol stuff
def dataReceived(self, data):
raise UnexpectedData(
"Received data from the server on a "
"send-only data-connection"
)
def makeConnection(self, transport):
protocol.Protocol.makeConnection(self, transport)
self.connectedDeferred.callback(self)
def connectionLost(self, reason):
if reason.check(error.ConnectionDone):
self.deferred.callback('connection done')
else:
self.deferred.errback(reason)
#IFinishableConsumer stuff
def write(self, data):
self.transport.write(data)
def registerProducer(self, producer, streaming):
"""
Register the given producer with our transport.
"""
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
"""
Unregister the previously registered producer.
"""
self.transport.unregisterProducer()
def finish(self):
self.transport.loseConnection()
def decodeHostPort(line):
"""
Decode an FTP response specifying a host and port.
@return: a 2-tuple of (host, port).
"""
abcdef = re.sub('[^0-9, ]', '', line)
parsed = [int(p.strip()) for p in abcdef.split(',')]
for x in parsed:
if x < 0 or x > 255:
raise ValueError("Out of range", line, x)
a, b, c, d, e, f = parsed
host = "%s.%s.%s.%s" % (a, b, c, d)
port = (int(e) << 8) + int(f)
return host, port
def encodeHostPort(host, port):
numbers = host.split('.') + [str(port >> 8), str(port % 256)]
return ','.join(numbers)
def _unwrapFirstError(failure):
failure.trap(defer.FirstError)
return failure.value.subFailure
class FTPDataPortFactory(protocol.ServerFactory):
"""
Factory for data connections that use the PORT command
(i.e. "active" transfers)
"""
noisy = 0
def buildProtocol(self, addr):
# This is a bit hackish -- we already have a Protocol instance,
# so just return it instead of making a new one
# FIXME: Reject connections from the wrong address/port
# (potential security problem)
self.protocol.factory = self
self.port.loseConnection()
return self.protocol
class FTPClientBasic(basic.LineReceiver):
"""
Foundations of an FTP client.
"""
debug = False
def __init__(self):
self.actionQueue = []
self.greeting = None
self.nextDeferred = defer.Deferred().addCallback(self._cb_greeting)
self.nextDeferred.addErrback(self.fail)
self.response = []
self._failed = 0
def fail(self, error):
"""
Give an error to any queued deferreds.
"""
self._fail(error)
def _fail(self, error):
"""
Errback all queued deferreds.
"""
if self._failed:
# We're recursing; bail out here for simplicity
return error
self._failed = 1
if self.nextDeferred:
try:
self.nextDeferred.errback(failure.Failure(ConnectionLost('FTP connection lost', error)))
except defer.AlreadyCalledError:
pass
for ftpCommand in self.actionQueue:
ftpCommand.fail(failure.Failure(ConnectionLost('FTP connection lost', error)))
return error
def _cb_greeting(self, greeting):
self.greeting = greeting
def sendLine(self, line):
"""
(Private) Sends a line, unless line is None.
"""
if line is None:
return
basic.LineReceiver.sendLine(self, line)
def sendNextCommand(self):
"""
(Private) Processes the next command in the queue.
"""
ftpCommand = self.popCommandQueue()
if ftpCommand is None:
self.nextDeferred = None
return
if not ftpCommand.ready:
self.actionQueue.insert(0, ftpCommand)
reactor.callLater(1.0, self.sendNextCommand)
self.nextDeferred = None
return
# FIXME: this if block doesn't belong in FTPClientBasic, it belongs in
# FTPClient.
if ftpCommand.text == 'PORT':
self.generatePortCommand(ftpCommand)
if self.debug:
log.msg('<-- %s' % ftpCommand.text)
self.nextDeferred = ftpCommand.deferred
self.sendLine(ftpCommand.text)
def queueCommand(self, ftpCommand):
"""
Add an FTPCommand object to the queue.
If it's the only thing in the queue, and we are connected and we aren't
waiting for a response of an earlier command, the command will be sent
immediately.
@param ftpCommand: an L{FTPCommand}
"""
self.actionQueue.append(ftpCommand)
if (len(self.actionQueue) == 1 and self.transport is not None and
self.nextDeferred is None):
self.sendNextCommand()
def queueStringCommand(self, command, public=1):
"""
Queues a string to be issued as an FTP command
@param command: string of an FTP command to queue
@param public: a flag intended for internal use by FTPClient. Don't
change it unless you know what you're doing.
@return: a L{Deferred} that will be called when the response to the
command has been received.
"""
ftpCommand = FTPCommand(command, public)
self.queueCommand(ftpCommand)
return ftpCommand.deferred
def popCommandQueue(self):
"""
Return the front element of the command queue, or None if empty.
"""
if self.actionQueue:
return self.actionQueue.pop(0)
else:
return None
def queueLogin(self, username, password):
"""
Login: send the username, send the password.
If the password is C{None}, the PASS command won't be sent. Also, if
the response to the USER command has a response code of 230 (User logged
in), then PASS won't be sent either.
"""
# Prepare the USER command
deferreds = []
userDeferred = self.queueStringCommand('USER ' + username, public=0)
deferreds.append(userDeferred)
# Prepare the PASS command (if a password is given)
if password is not None:
passwordCmd = FTPCommand('PASS ' + password, public=0)
self.queueCommand(passwordCmd)
deferreds.append(passwordCmd.deferred)
# Avoid sending PASS if the response to USER is 230.
# (ref: http://cr.yp.to/ftp/user.html#user)
def cancelPasswordIfNotNeeded(response):
if response[0].startswith('230'):
# No password needed!
self.actionQueue.remove(passwordCmd)
return response
userDeferred.addCallback(cancelPasswordIfNotNeeded)
# Error handling.
for deferred in deferreds:
# If something goes wrong, call fail
deferred.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
deferred.addErrback(lambda x: None)
def lineReceived(self, line):
"""
(Private) Parses the response messages from the FTP server.
"""
# Add this line to the current response
if self.debug:
log.msg('--> %s' % line)
self.response.append(line)
# Bail out if this isn't the last line of a response
# The last line of response starts with 3 digits followed by a space
codeIsValid = re.match(r'\d{3} ', line)
if not codeIsValid:
return
code = line[0:3]
# Ignore marks
if code[0] == '1':
return
# Check that we were expecting a response
if self.nextDeferred is None:
self.fail(UnexpectedResponse(self.response))
return
# Reset the response
response = self.response
self.response = []
# Look for a success or error code, and call the appropriate callback
if code[0] in ('2', '3'):
# Success
self.nextDeferred.callback(response)
elif code[0] in ('4', '5'):
# Failure
self.nextDeferred.errback(failure.Failure(CommandFailed(response)))
else:
# This shouldn't happen unless something screwed up.
log.msg('Server sent invalid response code %s' % (code,))
self.nextDeferred.errback(failure.Failure(BadResponse(response)))
# Run the next command
self.sendNextCommand()
def connectionLost(self, reason):
self._fail(reason)
class _PassiveConnectionFactory(protocol.ClientFactory):
noisy = False
def __init__(self, protoInstance):
self.protoInstance = protoInstance
def buildProtocol(self, ignored):
self.protoInstance.factory = self
return self.protoInstance
def clientConnectionFailed(self, connector, reason):
e = FTPError('Connection Failed', reason)
self.protoInstance.deferred.errback(e)
class FTPClient(FTPClientBasic):
"""
L{FTPClient} is a client implementation of the FTP protocol which
exposes FTP commands as methods which return L{Deferred}s.
Each command method returns a L{Deferred} which is called back when a
successful response code (2xx or 3xx) is received from the server or
which is error backed if an error response code (4xx or 5xx) is received
from the server or if a protocol violation occurs. If an error response
code is received, the L{Deferred} fires with a L{Failure} wrapping a
L{CommandFailed} instance. The L{CommandFailed} instance is created
with a list of the response lines received from the server.
See U{RFC 959<http://www.ietf.org/rfc/rfc959.txt>} for error code
definitions.
Both active and passive transfers are supported.
@ivar passive: See description in __init__.
"""
connectFactory = reactor.connectTCP
def __init__(self, username='anonymous',
password='twisted@twistedmatrix.com',
passive=1):
"""
Constructor.
I will login as soon as I receive the welcome message from the server.
@param username: FTP username
@param password: FTP password
@param passive: flag that controls if I use active or passive data
connections. You can also change this after construction by
assigning to C{self.passive}.
"""
FTPClientBasic.__init__(self)
self.queueLogin(username, password)
self.passive = passive
def fail(self, error):
"""
Disconnect, and also give an error to any queued deferreds.
"""
self.transport.loseConnection()
self._fail(error)
def receiveFromConnection(self, commands, protocol):
"""
Retrieves a file or listing generated by the given command,
feeding it to the given protocol.
@param commands: list of strings of FTP commands to execute then receive
the results of (e.g. C{LIST}, C{RETR})
@param protocol: A L{Protocol} B{instance} e.g. an
L{FTPFileListProtocol}, or something that can be adapted to one.
Typically this will be an L{IConsumer} implementation.
@return: L{Deferred}.
"""
protocol = interfaces.IProtocol(protocol)
wrapper = ProtocolWrapper(protocol, defer.Deferred())
return self._openDataConnection(commands, wrapper)
def queueLogin(self, username, password):
"""
Login: send the username, send the password, and
set retrieval mode to binary
"""
FTPClientBasic.queueLogin(self, username, password)
d = self.queueStringCommand('TYPE I', public=0)
# If something goes wrong, call fail
d.addErrback(self.fail)
# But also swallow the error, so we don't cause spurious errors
d.addErrback(lambda x: None)
def sendToConnection(self, commands):
"""
XXX
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
s = SenderProtocol()
r = self._openDataConnection(commands, s)
return (s.connectedDeferred, r)
def _openDataConnection(self, commands, protocol):
"""
This method returns a DeferredList.
"""
cmds = [FTPCommand(command, public=1) for command in commands]
cmdsDeferred = defer.DeferredList([cmd.deferred for cmd in cmds],
fireOnOneErrback=True, consumeErrors=True)
cmdsDeferred.addErrback(_unwrapFirstError)
if self.passive:
# Hack: use a mutable object to sneak a variable out of the
# scope of doPassive
_mutable = [None]
def doPassive(response):
"""Connect to the port specified in the response to PASV"""
host, port = decodeHostPort(response[-1][4:])
f = _PassiveConnectionFactory(protocol)
_mutable[0] = self.connectFactory(host, port, f)
pasvCmd = FTPCommand('PASV')
self.queueCommand(pasvCmd)
pasvCmd.deferred.addCallback(doPassive).addErrback(self.fail)
results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
# Ensure the connection is always closed
def close(x, m=_mutable):
m[0] and m[0].disconnect()
return x
d.addBoth(close)
else:
# We just place a marker command in the queue, and will fill in
# the host and port numbers later (see generatePortCommand)
portCmd = FTPCommand('PORT')
# Ok, now we jump through a few hoops here.
# This is the problem: a transfer is not to be trusted as complete
# until we get both the "226 Transfer complete" message on the
# control connection, and the data socket is closed. Thus, we use
# a DeferredList to make sure we only fire the callback at the
# right time.
portCmd.transferDeferred = protocol.deferred
portCmd.protocol = protocol
portCmd.deferred.addErrback(portCmd.transferDeferred.errback)
self.queueCommand(portCmd)
# Create dummy functions for the next callback to call.
# These will also be replaced with real functions in
# generatePortCommand.
portCmd.loseConnection = lambda result: result
portCmd.fail = lambda error: error
# Ensure that the connection always gets closed
cmdsDeferred.addErrback(lambda e, pc=portCmd: pc.fail(e) or e)
results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
for cmd in cmds:
self.queueCommand(cmd)
return d
def generatePortCommand(self, portCmd):
"""
(Private) Generates the text of a given PORT command.
"""
# The problem is that we don't create the listening port until we need
# it for various reasons, and so we have to muck about to figure out
# what interface and port it's listening on, and then finally we can
# create the text of the PORT command to send to the FTP server.
# FIXME: This method is far too ugly.
# FIXME: The best solution is probably to only create the data port
# once per FTPClient, and just recycle it for each new download.
# This should be ok, because we don't pipeline commands.
# Start listening on a port
factory = FTPDataPortFactory()
factory.protocol = portCmd.protocol
listener = reactor.listenTCP(0, factory)
factory.port = listener
# Ensure we close the listening port if something goes wrong
def listenerFail(error, listener=listener):
if listener.connected:
listener.loseConnection()
return error
portCmd.fail = listenerFail
# Construct crufty FTP magic numbers that represent host & port
host = self.transport.getHost().host
port = listener.getHost().port
portCmd.text = 'PORT ' + encodeHostPort(host, port)
def escapePath(self, path):
"""
Returns a FTP escaped path (replace newlines with nulls).
"""
# Escape newline characters
return path.replace('\n', '\0')
def retrieveFile(self, path, protocol, offset=0):
"""
Retrieve a file from the given path
This method issues the 'RETR' FTP command.
The file is fed into the given Protocol instance. The data connection
will be passive if self.passive is set.
@param path: path to file that you wish to receive.
@param protocol: a L{Protocol} instance.
@param offset: offset to start downloading from
@return: L{Deferred}
"""
cmds = ['RETR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.receiveFromConnection(cmds, protocol)
retr = retrieveFile
def storeFile(self, path, offset=0):
"""
Store a file at the given path.
This method issues the 'STOR' FTP command.
@return: A tuple of two L{Deferred}s:
- L{Deferred} L{IFinishableConsumer}. You must call
the C{finish} method on the IFinishableConsumer when the file
is completely transferred.
- L{Deferred} list of control-connection responses.
"""
cmds = ['STOR ' + self.escapePath(path)]
if offset:
cmds.insert(0, ('REST ' + str(offset)))
return self.sendToConnection(cmds)
stor = storeFile
def rename(self, pathFrom, pathTo):
"""
Rename a file.
This method issues the I{RNFR}/I{RNTO} command sequence to rename
C{pathFrom} to C{pathTo}.
@param: pathFrom: the absolute path to the file to be renamed
@type pathFrom: C{str}
@param: pathTo: the absolute path to rename the file to.
@type pathTo: C{str}
@return: A L{Deferred} which fires when the rename operation has
succeeded or failed. If it succeeds, the L{Deferred} is called
back with a two-tuple of lists. The first list contains the
responses to the I{RNFR} command. The second list contains the
responses to the I{RNTO} command. If either I{RNFR} or I{RNTO}
fails, the L{Deferred} is errbacked with L{CommandFailed} or
L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
renameFrom = self.queueStringCommand('RNFR ' + self.escapePath(pathFrom))
renameTo = self.queueStringCommand('RNTO ' + self.escapePath(pathTo))
fromResponse = []
# Use a separate Deferred for the ultimate result so that Deferred
# chaining can't interfere with its result.
result = defer.Deferred()
# Bundle up all the responses
result.addCallback(lambda toResponse: (fromResponse, toResponse))
def ebFrom(failure):
# Make sure the RNTO doesn't run if the RNFR failed.
self.popCommandQueue()
result.errback(failure)
# Save the RNFR response to pass to the result Deferred later
renameFrom.addCallbacks(fromResponse.extend, ebFrom)
# Hook up the RNTO to the result Deferred as well
renameTo.chainDeferred(result)
return result
def list(self, path, protocol):
"""
Retrieve a file listing into the given protocol instance.
This method issues the 'LIST' FTP command.
@param path: path to get a file listing for.
@param protocol: a L{Protocol} instance, probably a
L{FTPFileListProtocol} instance. It can cope with most common file
listing formats.
@return: L{Deferred}
"""
if path is None:
path = ''
return self.receiveFromConnection(['LIST ' + self.escapePath(path)], protocol)
def nlst(self, path, protocol):
"""
Retrieve a short file listing into the given protocol instance.
This method issues the 'NLST' FTP command.
NLST (should) return a list of filenames, one per line.
@param path: path to get short file listing for.
@param protocol: a L{Protocol} instance.
"""
if path is None:
path = ''
return self.receiveFromConnection(['NLST ' + self.escapePath(path)], protocol)
def cwd(self, path):
"""
Issues the CWD (Change Working Directory) command.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CWD ' + self.escapePath(path))
def makeDirectory(self, path):
"""
Make a directory
This method issues the MKD command.
@param path: The path to the directory to create.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. If the
directory is created, the L{Deferred} is called back with the
server response. If the server response indicates the directory
was not created, the L{Deferred} is errbacked with a L{Failure}
wrapping L{CommandFailed} or L{BadResponse}.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('MKD ' + self.escapePath(path))
def removeFile(self, path):
"""
Delete a file on the server.
L{removeFile} issues a I{DELE} command to the server to remove the
indicated file. Note that this command cannot remove a directory.
@param path: The path to the file to delete. May be relative to the
current dir.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. On error,
it is errbacked with either L{CommandFailed} or L{BadResponse}. On
success, it is called back with a list of response lines.
@rtype: L{Deferred}
@since: 8.2
"""
return self.queueStringCommand('DELE ' + self.escapePath(path))
def removeDirectory(self, path):
"""
Delete a directory on the server.
L{removeDirectory} issues a I{RMD} command to the server to remove the
indicated directory. Described in RFC959.
@param path: The path to the directory to delete. May be relative to
the current working directory.
@type path: C{str}
@return: A L{Deferred} which fires when the server responds. On error,
it is errbacked with either L{CommandFailed} or L{BadResponse}. On
success, it is called back with a list of response lines.
@rtype: L{Deferred}
@since: 11.1
"""
return self.queueStringCommand('RMD ' + self.escapePath(path))
def cdup(self):
"""
Issues the CDUP (Change Directory UP) command.
@return: a L{Deferred} that will be called when done.
"""
return self.queueStringCommand('CDUP')
def pwd(self):
"""
Issues the PWD (Print Working Directory) command.
The L{getDirectory} does the same job but automatically parses the
result.
@return: a L{Deferred} that will be called when done. It is up to the
caller to interpret the response, but the L{parsePWDResponse} method
in this module should work.
"""
return self.queueStringCommand('PWD')
def getDirectory(self):
"""
Returns the current remote directory.
@return: a L{Deferred} that will be called back with a C{str} giving
the remote directory or which will errback with L{CommandFailed}
if an error response is returned.
"""
def cbParse(result):
try:
# The only valid code is 257
if int(result[0].split(' ', 1)[0]) != 257:
raise ValueError
except (IndexError, ValueError):
return failure.Failure(CommandFailed(result))
path = parsePWDResponse(result[0])
if path is None:
return failure.Failure(CommandFailed(result))
return path
return self.pwd().addCallback(cbParse)
def quit(self):
"""
Issues the I{QUIT} command.
@return: A L{Deferred} that fires when the server acknowledges the
I{QUIT} command. The transport should not be disconnected until
this L{Deferred} fires.
"""
return self.queueStringCommand('QUIT')
class FTPFileListProtocol(basic.LineReceiver):
"""
Parser for standard FTP file listings
This is the evil required to match::
-rw-r--r-- 1 root other 531 Jan 29 03:26 README
If you need different evil for a wacky FTP server, you can
override either C{fileLinePattern} or C{parseDirectoryLine()}.
It populates the instance attribute self.files, which is a list containing
dicts with the following keys (examples from the above line):
- filetype: e.g. 'd' for directories, or '-' for an ordinary file
- perms: e.g. 'rw-r--r--'
- nlinks: e.g. 1
- owner: e.g. 'root'
- group: e.g. 'other'
- size: e.g. 531
- date: e.g. 'Jan 29 03:26'
- filename: e.g. 'README'
- linktarget: e.g. 'some/file'
Note that the 'date' value will be formatted differently depending on the
date. Check U{http://cr.yp.to/ftp.html} if you really want to try to parse
it.
It also matches the following::
-rw-r--r-- 1 root other 531 Jan 29 03:26 I HAVE\ SPACE
- filename: e.g. 'I HAVE SPACE'
-rw-r--r-- 1 root other 531 Jan 29 03:26 LINK -> TARGET
- filename: e.g. 'LINK'
- linktarget: e.g. 'TARGET'
-rw-r--r-- 1 root other 531 Jan 29 03:26 N S -> L S
- filename: e.g. 'N S'
- linktarget: e.g. 'L S'
@ivar files: list of dicts describing the files in this listing
"""
fileLinePattern = re.compile(
r'^(?P<filetype>.)(?P<perms>.{9})\s+(?P<nlinks>\d*)\s*'
r'(?P<owner>\S+)\s+(?P<group>\S+)\s+(?P<size>\d+)\s+'
r'(?P<date>...\s+\d+\s+[\d:]+)\s+(?P<filename>.{1,}?)'
r'( -> (?P<linktarget>[^\r]*))?\r?$'
)
delimiter = '\n'
def __init__(self):
self.files = []
def lineReceived(self, line):
d = self.parseDirectoryLine(line)
if d is None:
self.unknownLine(line)
else:
self.addFile(d)
def parseDirectoryLine(self, line):
"""
Return a dictionary of fields, or None if line cannot be parsed.
@param line: line of text expected to contain a directory entry
@type line: str
@return: dict
"""
match = self.fileLinePattern.match(line)
if match is None:
return None
else:
d = match.groupdict()
d['filename'] = d['filename'].replace(r'\ ', ' ')
d['nlinks'] = int(d['nlinks'])
d['size'] = int(d['size'])
if d['linktarget']:
d['linktarget'] = d['linktarget'].replace(r'\ ', ' ')
return d
def addFile(self, info):
"""
Append file information dictionary to the list of known files.
Subclasses can override or extend this method to handle file
information differently without affecting the parsing of data
from the server.
@param info: dictionary containing the parsed representation
of the file information
@type info: dict
"""
self.files.append(info)
def unknownLine(self, line):
"""
Deal with received lines which could not be parsed as file
information.
Subclasses can override this to perform any special processing
needed.
@param line: unparsable line as received
@type line: str
"""
pass
def parsePWDResponse(response):
"""
Returns the path from a response to a PWD command.
Responses typically look like::
257 "/home/andrew" is current directory.
For this example, I will return C{'/home/andrew'}.
If I can't find the path, I return C{None}.
"""
match = re.search('"(.*)"', response)
if match:
return match.groups()[0]
else:
return None
| bsd-3-clause |
skycucumber/xuemc | python/venv/lib/python2.7/site-packages/docutils/statemachine.py | 124 | 57572 | # $Id: statemachine.py 7464 2012-06-25 13:16:03Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils import utils
from docutils.utils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>self._stderr, u'%s: %s' % (type, value)
print >>self._stderr, 'input line %s' % (self.abs_line_number())
print >>self._stderr, (u'module %s, line %s, function %s' %
(module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| gpl-2.0 |
petteyg/intellij-community | python/lib/Lib/site-packages/django/utils/encoding.py | 89 | 6532 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| apache-2.0 |
GladeRom/android_external_chromium_org | tools/profile_chrome/perf_controller_unittest.py | 75 | 1205 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import json
from profile_chrome import controllers_unittest
from profile_chrome import perf_controller
from profile_chrome import ui
from pylib import constants
class PerfProfilerControllerTest(controllers_unittest.BaseControllerTest):
def testGetCategories(self):
if not perf_controller.PerfProfilerController.IsSupported():
return
categories = \
perf_controller.PerfProfilerController.GetCategories(self.device)
assert 'cycles' in ' '.join(categories)
def testTracing(self):
if not perf_controller.PerfProfilerController.IsSupported():
return
ui.EnableTestMode()
categories = ['cycles']
controller = perf_controller.PerfProfilerController(self.device,
categories)
interval = 1
try:
controller.StartTracing(interval)
finally:
controller.StopTracing()
result = controller.PullTrace()
try:
with open(result) as f:
json.loads(f.read())
finally:
os.remove(result)
| bsd-3-clause |
metaist/hebphonics | src/hebphonics/parsers/chabad_org.py | 1 | 6500 | #!/usr/bin/env python
# coding: utf-8
"""Download and parse Tanakh from <https://chabad.org/>.
Each chapter is in a separate HTML file (e.g., `01 - Genesis - 001.html`) and contains
navigation and textual information.
The relevant structure is:
```
<h1>...</h1>
<table class="Co_TanachTable">
<tr class="Co_Verse">
<td class="hebrew">
<a class="co_VerseNum">...</a><span class="co_VerseText">...</span>
</td>
</tr>
</table>
```
Notes:
- `<h1>` contains the name of the book and chapter
- `<a.co_VerseNum>` is the Hebrew letter value of the verse number
- `<span.co_VerseText>` contains the text of the Hebrew verse
- `<span.instructional.ksiv>` indicates the ketiv (we ignore)
- ketiv is occasionally indicated with parenthesis or brackets (we ignore)
"""
# native
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
# lib
from bs4 import BeautifulSoup
from tqdm import tqdm
import requests
# pkg
from . import (
Msg,
parse_args,
queuer,
save_database,
spawn_processes,
USER_AGENT,
)
from .. import tokens as T
BOOK_NAMES = [
"Genesis",
"Exodus",
"Leviticus",
"Numbers",
"Deuteronomy",
#
"Joshua",
"Judges",
"I Samuel",
"II Samuel",
"I Kings",
"II Kings",
"Isaiah",
"Jeremiah",
"Ezekiel",
"Hosea",
"Joel",
"Amos",
"Obadiah",
"Jonah",
"Micah",
"Nahum",
"Habakkuk",
"Zephaniah",
"Haggai",
"Zechariah",
"Malachi",
#
"Psalms",
"Proverbs",
"Job",
"Song of Songs",
"Ruth",
"Lamentations",
"Ecclesiastes",
"Esther",
"Daniel",
"Ezra",
"Nehemiah",
"I Chronicles",
"II Chronicles",
]
def get_words(line):
"""Return words from verse."""
line = (
line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
.replace(":", T.PUNCTUATION_SOF_PASUQ)
.replace("|", "") # Should be `paseq`, but we don't want that either.
.replace(" " + T.POINT_QAMATS + " ", T.POINT_QAMATS + " ") # Ruth 1:9
.replace("\n" + T.POINT_HOLAM, T.POINT_HOLAM) # Esther 3:1
)
line = re.sub(r"[\(\[][^)\]]+[\]\)]", "", line)
return line.strip().split()
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
"""Count words in a book."""
tqdm.set_lock(lock)
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = BeautifulSoup(Path(msg.data).read_text(), "lxml").find("text")
book_id = int(book["num"])
result["books"].append(dict(id=book_id, name=book["name"], corpus="chabad.org"))
desc = f"{os.getpid()} COUNT {book['name']:<15}"
for line in tqdm(book.find_all("line"), desc=desc, position=pos):
for raw in get_words(line.string):
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = line["ref"]
result["words"][clean] = dict(
book_id=book_id, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
"""Enqueue paths of books to parse."""
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", str(path)))
def download(folder: Path):
"""Download the corpus to the given `folder`."""
# pylint: disable=too-many-locals
folder.mkdir(parents=True, exist_ok=True)
tagstr = lambda t: " ".join(
[e.string.strip() for e in t.contents if e.name is None]
)
def _get_xml(info):
result = """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n"""
result += (
f'<text corpus="chabad.org" num="{info["num"]}" name="{info["name"]}">\n'
)
for line in info["lines"]:
result += f' <line ref="{line["ref"]}">{line["text"]}</line>\n'
result += "</text>\n"
return result
url_base = "https://www.chabad.org"
next_url = f"{url_base}/library/bible_cdo/aid/8165"
book = {}
book_path = None
while next_url:
res = requests.get(next_url, headers={"User-Agent": USER_AGENT})
soup = BeautifulSoup(res.text, "html.parser")
next_url = soup.find(rel="next")
if not next_url or next_url["href"] == "/article.asp?aid=0":
next_url = None
else:
next_url = f"{url_base}{next_url['href']}"
h1 = soup.find("h1").string.split(" - ")
book_name, chapter = h1[-2], int(h1[-1].split(" ")[-1])
book_num = BOOK_NAMES.index(book_name) + 1
if book_num != book.get("num"): # new book
if book and book_path:
book_path.write_text(_get_xml(book))
print(f"downloaded {book_path}")
book = dict(num=book_num, name=book_name, lines=[])
book_path = folder / f"{book_num:02}-{book_name}.xml"
verses = soup.find_all("tr", class_="Co_Verse")
for verse in tqdm(verses, desc=f"{chapter:03} {book_name:<15}"):
verse_num = int(verse.find(class_="co_VerseNum").string)
verse = verse.find(class_="hebrew").find(class_="co_VerseText")
book["lines"].append(
dict(ref=f"{book_name} {chapter}:{verse_num}", text=tagstr(verse))
)
if book and book_path:
book_path.write_text(_get_xml(book))
print(f"downloaded {book_path}")
def main(argv: List[str] = None):
"""Parse texts from <https://www.chabad.org>.
Usage: chabad_org.py [download <folder> | -i <PATH>] [-n COUNT]
Options:
download <folder> download HTML files to <folder>
--index, -i PATH HTML folder [default: text/chabad.org]
--cpus, -n NUM number of CPUs to use; at least 2 [default: all]
"""
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
folder = Path(args["<folder>"]).resolve()
download(folder)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__": # pragma: no cover
main()
| mit |
Mixser/django | tests/gis_tests/tests.py | 281 | 3127 | import sys
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
from django.utils import six
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
six.reraise(*sys.exc_info())
if HAS_POSTGRES:
class FakeConnection(object):
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostgisVersionCheck(unittest.TestCase):
"""
Tests that the postgis version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
self.assertRaises(Exception, lambda: ops.spatial_version)
def test_no_version_number(self):
ops = FakePostGISOperations()
self.assertRaises(ImproperlyConfigured, lambda: ops.spatial_version)
| bsd-3-clause |
tensorflow/federated | tensorflow_federated/__init__.py | 1 | 5139 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The TensorFlow Federated library."""
import sys
from tensorflow_federated.version import __version__ # pylint: disable=g-bad-import-order
from tensorflow_federated import experimental
from tensorflow_federated.python import aggregators
from tensorflow_federated.python import analytics
from tensorflow_federated.python import learning
from tensorflow_federated.python import simulation
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing as profiler
from tensorflow_federated.python.core import backends
from tensorflow_federated.python.core import framework
from tensorflow_federated.python.core import templates
from tensorflow_federated.python.core import test
from tensorflow_federated.python.core import types
from tensorflow_federated.python.core.api.computation_base import Computation
from tensorflow_federated.python.core.api.computations import check_returns_type
from tensorflow_federated.python.core.api.computations import federated_computation
from tensorflow_federated.python.core.api.computations import tf_computation
from tensorflow_federated.python.core.impl.federated_context.data import data
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_aggregate
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_broadcast
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_collect
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_eval
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_map
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_mean
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_secure_select
# TODO(b/191188806): Make this public once it is fully supported by MRF and
# the test runtime.
# from tensorflow_federated.python.core.impl.
# federated_context.intrinsics import federated_secure_sum
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_secure_sum_bitwidth
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_select
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_sum
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_value
from tensorflow_federated.python.core.impl.federated_context.intrinsics import federated_zip
from tensorflow_federated.python.core.impl.federated_context.intrinsics import sequence_map
from tensorflow_federated.python.core.impl.federated_context.intrinsics import sequence_reduce
from tensorflow_federated.python.core.impl.federated_context.intrinsics import sequence_sum
from tensorflow_federated.python.core.impl.federated_context.value_impl import to_value
from tensorflow_federated.python.core.impl.federated_context.value_impl import Value
from tensorflow_federated.python.core.impl.types.computation_types import at_clients as type_at_clients
from tensorflow_federated.python.core.impl.types.computation_types import at_server as type_at_server
from tensorflow_federated.python.core.impl.types.computation_types import FederatedType
from tensorflow_federated.python.core.impl.types.computation_types import FunctionType
from tensorflow_federated.python.core.impl.types.computation_types import SequenceType
from tensorflow_federated.python.core.impl.types.computation_types import StructType
from tensorflow_federated.python.core.impl.types.computation_types import StructWithPythonType
from tensorflow_federated.python.core.impl.types.computation_types import TensorType
from tensorflow_federated.python.core.impl.types.computation_types import to_type
from tensorflow_federated.python.core.impl.types.computation_types import Type
from tensorflow_federated.python.core.impl.types.placements import CLIENTS
from tensorflow_federated.python.core.impl.types.placements import SERVER
from tensorflow_federated.python.core.impl.types.type_conversions import structure_from_tensor_type_tree
from tensorflow_federated.python.core.impl.types.typed_object import TypedObject
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise Exception('TFF only supports Python versions 3.6 or later.')
# Initialize a default execution context. This is implicitly executed the
# first time a module in the `core` package is imported.
backends.native.set_local_execution_context()
| apache-2.0 |
OCForks/phantomjs | src/qt/qtbase/src/3rdparty/freetype/src/tools/chktrcmp.py | 381 | 3826 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| bsd-3-clause |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/test/test_bufio.py | 64 | 2654 | import unittest
from test import support
import io # C implementation.
import _pyio as pyio # Python implementation.
# Simple test to ensure that optimizations in the IO library deliver the
# expected results. For best testing, run this under a debug-build Python too
# (to exercise asserts in the C code).
lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]
class BufferSizeTest(unittest.TestCase):
def try_one(self, s):
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
# Ensure we can open TESTFN for writing.
support.unlink(support.TESTFN)
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = self.open(support.TESTFN, "wb")
try:
# write once with \n and once without
f.write(s)
f.write(b"\n")
f.write(s)
f.close()
f = open(support.TESTFN, "rb")
line = f.readline()
self.assertEqual(line, s + b"\n")
line = f.readline()
self.assertEqual(line, s)
line = f.readline()
self.assertTrue(not line) # Must be at EOF
f.close()
finally:
support.unlink(support.TESTFN)
def drive_one(self, pattern):
for length in lengths:
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. Try this with all
# small sizes and various powers of 2, so we exercise all likely
# stdio buffer sizes, and "off by one" errors on both sides.
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
self.assertEqual(len(teststring), length)
self.try_one(teststring)
self.try_one(teststring + b"x")
self.try_one(teststring[:-1])
def test_primepat(self):
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
self.drive_one(b"1234567890\00\01\02\03\04\05\06")
def test_nullpat(self):
self.drive_one(bytes(1000))
class CBufferSizeTest(BufferSizeTest):
open = io.open
class PyBufferSizeTest(BufferSizeTest):
open = staticmethod(pyio.open)
def test_main():
support.run_unittest(CBufferSizeTest, PyBufferSizeTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
charris/numpy | numpy/distutils/fcompiler/compaq.py | 7 | 3903 |
#http://www.compaq.com/fortran/docs/
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
r' Version (?P<version>[^\s]*).*')
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError as e:
if '_MSVCCompiler__root' in str(e):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
except IOError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise
except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='compaq').get_version())
| bsd-3-clause |
YACOWS/opps-polls | opps/polls/templatetags/polls_tags.py | 1 | 3645 | # -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from django.utils import timezone
from opps.polls.models import Poll, PollBox, PollConfig
register = template.Library()
@register.simple_tag(takes_context=True)
def get_poll(context, slug, relation='channel', template_name=None):
"""
{% get_poll 'channel_slug' relation='channel' %}
{% get_poll 'post_slug' relation='post' %}
"""
poll = None
t = template.loader.get_template('polls/poll_detail_ajax.html')
if template_name:
t = template.loader.get_template(template_name)
# look in to config check if there is a poll configured for channel
if relation == 'channel':
"""
Config should be:
key: poll_slug
value: the-poll-slug
channel: Channel (object)
"""
poll_slug = PollConfig.get_value('poll_slug', channel__slug=slug)
# get latest poll for the channel
try:
poll = Poll.objects.filter(
channel__slug=slug,
published=True,
date_available__lte=timezone.now()
).latest('date_insert')
except Poll.DoesNotExist:
poll = []
if poll_slug:
try:
poll = Poll.objects.filter(
slug=poll_slug,
channel__slug=slug,
published=True,
date_available__lte=timezone.now()
).latest('date_insert')
except Poll.DoesNotExist:
poll = []
elif relation == 'post':
try:
poll = Poll.objects.filter(
posts__slug=slug,
published=True,
date_available__lte=timezone.now()
).latest('date_insert')
except Poll.DoesNotExist:
poll = []
return t.render(template.Context({'poll': poll, 'context': context}))
@register.simple_tag
def get_active_polls(number=5, channel_slug=None,
template_name='polls/actives.html'):
active_polls = Poll.objects.all_opened()
if channel_slug:
active_polls = active_polls.filter(channel__slug=channel_slug)
active_polls = active_polls[:number]
t = template.loader.get_template(template_name)
return t.render(template.Context({'active_polls': active_polls,
'channel_slug': channel_slug,
'number': number}))
@register.simple_tag
def get_pollbox(slug, channel_slug=None, template_name=None):
if channel_slug:
slug = u"{0}-{1}".format(slug, channel_slug)
try:
box = PollBox.objects.get(site=settings.SITE_ID, slug=slug,
date_available__lte=timezone.now(),
published=True)
except PollBox.DoesNotExist:
box = None
t = template.loader.get_template('polls/pollbox_detail.html')
if template_name:
t = template.loader.get_template(template_name)
return t.render(template.Context({'pollbox': box, 'slug': slug}))
@register.simple_tag
def get_all_pollbox(channel_slug, template_name=None):
boxes = PollBox.objects.filter(site=settings.SITE_ID,
date_available__lte=timezone.now(),
published=True,
channel__slug=channel_slug)
t = template.loader.get_template('polls/pollbox_list.html')
if template_name:
t = template.loader.get_template(template_name)
return t.render(template.Context({'pollboxes': boxes}))
| mit |
vijayanandnandam/youtube-dl | devscripts/show-downloads-statistics.py | 15 | 1349 | #!/usr/bin/env python
from __future__ import unicode_literals
import itertools
import json
import os
import re
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.compat import (
compat_print,
compat_urllib_request,
)
from youtube_dl.utils import format_bytes
def format_size(bytes):
return '%s (%d bytes)' % (format_bytes(bytes), bytes)
total_bytes = 0
for page in itertools.count(1):
releases = json.loads(compat_urllib_request.urlopen(
'https://api.github.com/repos/ytdl-org/youtube-dl/releases?page=%s' % page
).read().decode('utf-8'))
if not releases:
break
for release in releases:
compat_print(release['name'])
for asset in release['assets']:
asset_name = asset['name']
total_bytes += asset['download_count'] * asset['size']
if all(not re.match(p, asset_name) for p in (
r'^youtube-dl$',
r'^youtube-dl-\d{4}\.\d{2}\.\d{2}(?:\.\d+)?\.tar\.gz$',
r'^youtube-dl\.exe$')):
continue
compat_print(
' %s size: %s downloads: %d'
% (asset_name, format_size(asset['size']), asset['download_count']))
compat_print('total downloads traffic: %s' % format_size(total_bytes))
| unlicense |
fabiobatalha/ratchet | ratchet/utils.py | 1 | 1682 | #coding: utf-8
import os
import weakref
from ConfigParser import SafeConfigParser
class SingletonMixin(object):
"""
Adds a singleton behaviour to an existing class.
weakrefs are used in order to keep a low memory footprint.
As a result, args and kwargs passed to classes initializers
must be of weakly refereable types.
"""
_instances = weakref.WeakValueDictionary()
def __new__(cls, *args, **kwargs):
key = (cls, args, tuple(kwargs.items()))
if key in cls._instances:
return cls._instances[key]
new_instance = super(type(cls), cls).__new__(cls, *args, **kwargs)
cls._instances[key] = new_instance
return new_instance
class Configuration(SingletonMixin):
"""
Acts as a proxy to the ConfigParser module
"""
def __init__(self, fp, parser_dep=SafeConfigParser):
self.conf = parser_dep()
self.conf.readfp(fp)
@classmethod
def from_env(cls):
try:
filepath = os.environ['RATCHET_SETTINGS_FILE']
except KeyError:
raise ValueError('missing env variable RATCHET_SETTINGS_FILE')
return cls.from_file(filepath)
@classmethod
def from_file(cls, filepath):
"""
Returns an instance of Configuration
``filepath`` is a text string.
"""
fp = open(filepath, 'rb')
return cls(fp)
def __getattr__(self, attr):
return getattr(self.conf, attr)
def items(self):
"""Settings as key-value pair.
"""
return [(section, dict(self.conf.items(section))) for \
section in [section for section in self.conf.sections()]] | bsd-2-clause |
pipermerriam/pyethereum | ethereum/utils.py | 1 | 10020 | try:
from Crypto.Hash import keccak
sha3_256 = lambda x: keccak.new(digest_bits=256, data=x).digest()
except:
import sha3 as _sha3
sha3_256 = lambda x: _sha3.sha3_256(x).digest()
from bitcoin import privtopub
import sys
import rlp
from rlp.sedes import big_endian_int, BigEndianInt, Binary
from rlp.utils import decode_hex, encode_hex, ascii_chr, str_to_bytes
import random
big_endian_to_int = lambda x: big_endian_int.deserialize(str_to_bytes(x).lstrip(b'\x00'))
int_to_big_endian = lambda x: big_endian_int.serialize(x)
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
if sys.version_info.major == 2:
is_numeric = lambda x: isinstance(x, (int, long))
is_string = lambda x: isinstance(x, (str, unicode))
def to_string(value):
return str(value)
def int_to_bytes(value):
if isinstance(value, str):
return value
return int_to_big_endian(value)
def to_string_for_regexp(value):
return str(value)
unicode = unicode
else:
is_numeric = lambda x: isinstance(x, int)
is_string = lambda x: isinstance(x, bytes)
def to_string(value):
if isinstance(value, bytes):
return value
if isinstance(value, str):
return bytes(value, 'utf-8')
if isinstance(value, int):
return bytes(str(value), 'utf-8')
def int_to_bytes(value):
if isinstance(value, bytes):
return value
return int_to_big_endian(value)
def to_string_for_regexp(value):
return str(to_string(value), 'utf-8')
unicode = str
isnumeric = is_numeric
def mk_contract_address(sender, nonce):
return sha3(rlp.encode([normalize_address(sender), nonce]))[12:]
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
# decorator
def debug(label):
def deb(f):
def inner(*args, **kwargs):
i = random.randrange(1000000)
print(label, i, 'start', args)
x = f(*args, **kwargs)
print(label, i, 'end', x)
return x
return inner
return deb
def flatten(li):
o = []
for l in li:
o.extend(l)
return o
def bytearray_to_int(arr):
o = 0
for a in arr:
o = (o << 8) + a
return o
def int_to_32bytearray(i):
o = [0] * 32
for x in range(32):
o[31 - x] = i & 0xff
i >>= 8
return o
sha3_count = [0]
def sha3(seed):
sha3_count[0] += 1
return sha3_256(to_string(seed))
assert sha3('').encode('hex') == 'c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470'
def privtoaddr(x, extended=False):
if len(x) > 32:
x = decode_hex(x)
o = sha3(privtopub(x)[1:])[12:]
return add_checksum(o) if extended else o
def add_checksum(x):
if len(x) in (40, 48):
x = decode_hex(x)
if len(x) == 24:
return x
return x + sha3(x)[:4]
def check_and_strip_checksum(x):
if len(x) in (40, 48):
x = decode_hex(x)
assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:]
return x[:20]
def normalize_address(x, allow_blank=False):
if allow_blank and x == '':
return ''
if len(x) in (42, 50) and x[:2] == '0x':
x = x[2:]
if len(x) in (40, 48):
x = decode_hex(x)
if len(x) == 24:
assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:]
x = x[:20]
if len(x) != 20:
raise Exception("Invalid address format: %r" % x)
return x
def zpad(x, l):
return b'\x00' * max(0, l - len(x)) + x
def zunpad(x):
i = 0
while i < len(x) and (x[i] == 0 or x[i] == '\x00'):
i += 1
return x[i:]
def int_to_addr(x):
o = [''] * 20
for i in range(20):
o[19 - i] = ascii_chr(x & 0xff)
x >>= 8
return b''.join(o)
def coerce_addr_to_bin(x):
if is_numeric(x):
return encode_hex(zpad(big_endian_int.serialize(x), 20))
elif len(x) == 40 or len(x) == 0:
return decode_hex(x)
else:
return zpad(x, 20)[-20:]
def coerce_addr_to_hex(x):
if is_numeric(x):
return encode_hex(zpad(big_endian_int.serialize(x), 20))
elif len(x) == 40 or len(x) == 0:
return x
else:
return encode_hex(zpad(x, 20)[-20:])
def coerce_to_int(x):
if is_numeric(x):
return x
elif len(x) == 40:
return big_endian_to_int(decode_hex(x))
else:
return big_endian_to_int(x)
def coerce_to_bytes(x):
if is_numeric(x):
return big_endian_int.serialize(x)
elif len(x) == 40:
return decode_hex(x)
else:
return x
def parse_int_or_hex(s):
if is_numeric(s):
return s
elif s[:2] in (b'0x', '0x'):
s = to_string(s)
tail = (b'0' if len(s) % 2 else b'') + s[2:]
return big_endian_to_int(decode_hex(tail))
else:
return int(s)
def ceil32(x):
return x if x % 32 == 0 else x + 32 - (x % 32)
def to_signed(i):
return i if i < TT255 else i - TT256
def sha3rlp(x):
return sha3(rlp.encode(x))
# Format encoders/decoders for bin, addr, int
def decode_bin(v):
'''decodes a bytearray from serialization'''
if not is_string(v):
raise Exception("Value must be binary, not RLP array")
return v
def decode_addr(v):
'''decodes an address from serialization'''
if len(v) not in [0, 20]:
raise Exception("Serialized addresses must be empty or 20 bytes long!")
return encode_hex(v)
def decode_int(v):
'''decodes and integer from serialization'''
if len(v) > 0 and (v[0] == '\x00' or v[0] == 0):
raise Exception("No leading zero bytes allowed for integers")
return big_endian_to_int(v)
def decode_int256(v):
return big_endian_to_int(v)
def encode_bin(v):
'''encodes a bytearray into serialization'''
return v
def encode_root(v):
'''encodes a trie root into serialization'''
return v
def encode_int(v):
'''encodes an integer into serialization'''
if not is_numeric(v) or v < 0 or v >= TT256:
raise Exception("Integer invalid or out of range: %r" % v)
return int_to_big_endian(v)
def encode_int256(v):
return zpad(int_to_big_endian(v), 256)
def scan_bin(v):
if v[:2] in ('0x', b'0x'):
return decode_hex(v[2:])
else:
return decode_hex(v)
def scan_int(v):
if v[:2] in ('0x', b'0x'):
return big_endian_to_int(decode_hex(v[2:]))
else:
return int(v)
# Decoding from RLP serialization
decoders = {
"bin": decode_bin,
"addr": decode_addr,
"int": decode_int,
"int256b": decode_int256,
}
# Encoding to RLP serialization
encoders = {
"bin": encode_bin,
"int": encode_int,
"trie_root": encode_root,
"int256b": encode_int256,
}
# Encoding to printable format
printers = {
"bin": lambda v: b'0x' + encode_hex(v),
"addr": lambda v: v,
"int": lambda v: to_string(v),
"trie_root": lambda v: encode_hex(v),
"int256b": lambda x: encode_hex(zpad(encode_int256(x), 256))
}
# Decoding from printable format
scanners = {
"bin": scan_bin,
"addr": lambda x: x[2:] if x[:2] == b'0x' else x,
"int": scan_int,
"trie_root": lambda x: scan_bin,
"int256b": lambda x: big_endian_to_int(decode_hex(x))
}
def int_to_hex(x):
o = encode_hex(encode_int(x))
return '0x' + (o[1:] if (len(o) > 0 and o[0] == '0') else o)
def remove_0x_head(s):
return s[2:] if s[:2] == b'0x' else s
def print_func_call(ignore_first_arg=False, max_call_number=100):
''' utility function to facilitate debug, it will print input args before
function call, and print return value after function call
usage:
@print_func_call
def some_func_to_be_debu():
pass
:param ignore_first_arg: whether print the first arg or not.
useful when ignore the `self` parameter of an object method call
'''
from functools import wraps
def display(x):
x = to_string(x)
try:
x.decode('ascii')
except:
return 'NON_PRINTABLE'
return x
local = {'call_number': 0}
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
local['call_number'] += 1
tmp_args = args[1:] if ignore_first_arg and len(args) else args
this_call_number = local['call_number']
print(('{0}#{1} args: {2}, {3}'.format(
f.__name__,
this_call_number,
', '.join([display(x) for x in tmp_args]),
', '.join(display(key) + '=' + to_string(value)
for key, value in kwargs.items())
)))
res = f(*args, **kwargs)
print(('{0}#{1} return: {2}'.format(
f.__name__,
this_call_number,
display(res))))
if local['call_number'] > 100:
raise Exception("Touch max call number!")
return res
return wrapper
return inner
def dump_state(trie):
res = ''
for k, v in list(trie.to_dict().items()):
res += '%r:%r\n' % (encode_hex(k), encode_hex(v))
return res
class Denoms():
def __init__(self):
self.wei = 1
self.babbage = 10 ** 3
self.lovelace = 10 ** 6
self.shannon = 10 ** 9
self.szabo = 10 ** 12
self.finney = 10 ** 15
self.ether = 10 ** 18
self.turing = 2 ** 256
denoms = Denoms()
address = Binary.fixed_length(20, allow_empty=True)
int20 = BigEndianInt(20)
int32 = BigEndianInt(32)
int256 = BigEndianInt(256)
hash32 = Binary.fixed_length(32)
trie_root = Binary.fixed_length(32, allow_empty=True)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[91m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def DEBUG(msg, *args, **kwargs):
from ethereum import slogging
slogging.DEBUG(msg, *args, **kwargs)
| mit |
StackStorm/st2contrib | archive/packs/alertlogic/actions/scan_get_results.py | 6 | 1238 | #!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2actions.runners.pythonrunner import Action
from lib.get_scan_results import GetScanResults
class ScanResults(Action):
def run(self, scan_exec_id=None, new_vulns=False, new_ports=False):
"""
The template class for
Returns: An blank Dict.
Raises:
ValueError: On lack of key in config.
"""
return GetScanResults(self.config, scan_exec_id, new_vulns, new_ports)
| apache-2.0 |
tilacog/rows | tests/tests_utils.py | 1 | 2168 | # coding: utf-8
from __future__ import unicode_literals
import types
import unittest
from collections import OrderedDict
import rows.fields as fields
from rows.utils import create_table, ipartition, slug
class UtilsTestCase(unittest.TestCase):
def test_slug(self):
self.assertEqual(slug('Álvaro Justen'), 'alvaro_justen')
self.assertEqual(slug("Moe's Bar"), 'moes_bar')
self.assertEqual(slug("-----te-----st------"), 'te_st')
def test_ipartition(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
result = ipartition(iterable, 3)
self.assertEqual(type(result), types.GeneratorType)
self.assertEqual(list(result), [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]])
def test_create_table_skip_header(self):
field_types = OrderedDict([('integer', fields.IntegerField),
('string', fields.UnicodeField),])
data = [['1', 'Álvaro'], ['2', 'turicas'], ['3', 'Justen']]
table_1 = create_table(data, fields=field_types, skip_header=True)
table_2 = create_table(data, fields=field_types, skip_header=False)
self.assertEqual(field_types, table_1.fields)
self.assertEqual(table_1.fields, table_2.fields)
self.assertEqual(len(table_1), 2)
self.assertEqual(len(table_2), 3)
first_row = {'integer': 1, 'string': 'Álvaro'}
second_row = {'integer': 2, 'string': 'turicas'}
third_row = {'integer': 3, 'string': 'Justen'}
self.assertEqual(dict(table_1[0]._asdict()), second_row)
self.assertEqual(dict(table_2[0]._asdict()), first_row)
self.assertEqual(dict(table_1[1]._asdict()), third_row)
self.assertEqual(dict(table_2[1]._asdict()), second_row)
self.assertEqual(dict(table_2[2]._asdict()), third_row)
# TODO: test make_header
# TODO: test all features of create_table
# TODO: test if error is raised if len(row) != len(fields)
# TODO: test get_fobj_and_filename (BytesIO should return filename = None)
# TODO: test download_file
# TODO: test get_uri_information
# TODO: test import_from_uri
# TODO: test export_to_uri
| gpl-3.0 |
jr-garcia/Engendro3D | e3d/scene_management/ScenesManagerClass.py | 1 | 2253 | from __future__ import print_function
from .SceneClass import Scene
from ..backends.base_backend import DrawingData
class ScenesManager(object):
def __init__(self):
self._scenes = {}
self._models = None
self.sounds = None
self._engine = None
self.currentSceneID = ""
self._defaultScene = Scene
def initialize(self, engine):
self._models = engine.models
self.sounds = engine.sounds
self._engine = engine
self._defaultScene = Scene('DefaultScene', engine, resolution=160, gravity=-9.8)
self._scenes[self._defaultScene.ID] = self._defaultScene
def _get_idExists(self, ID):
return ID in self._scenes.keys()
idExists = property(fget=_get_idExists)
def addScene(self, ID, resolution=160, gravity=-9.8):
if ID in self._scenes.keys():
raise KeyError("The ID exists already.")
else:
ns = Scene(ID, self._engine, gravity, resolution)
self._scenes[ID] = ns
return ns
def removeScene(self, id):
if self.currentSceneID == id:
self.currentSceneID = ""
self._scenes.pop(id)
def getScene(self, id):
"""
@rtype : Scene
"""
return self._scenes.get(id, self._defaultScene)
def setCurrentSceneID(self, ID):
if ID not in self._scenes.keys():
raise KeyError("The ID does not exist")
else:
self.currentSceneID = ID
def setCurrentScene(self, scene):
if not scene:
scene = self._defaultScene
self.currentSceneID = scene.ID
def getCurrentScene(self):
"""
@rtype : Scene
"""
return self.getScene(self.currentSceneID)
currentScene = property(getCurrentScene, setCurrentScene)
def update(self, netElapsedTime, windowSize):
currentScene = self.currentScene
if currentScene:
return currentScene.update(netElapsedTime, windowSize)
else:
self.currentScene = self._defaultScene
return DrawingData()
def terminate(self):
print('scenes man terminate not implemented.')
# for scene in self._scenes ... terminate?
| mit |
jit/pyew | plugins/threatexpert.py | 16 | 1054 | #!/usr/bin/env python
"""
This file is part of Pyew
Copyright (C) 2009, 2010 Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import webbrowser
from hashlib import md5
def threatExpertSearch(pyew, args=None):
""" Search in Threat Expert for the behavior's report """
baseurl = "http://www.threatexpert.com/report.aspx?md5="
buf = pyew.getBuffer()
url = baseurl + md5(buf).hexdigest()
webbrowser.open(url)
functions={"threat":threatExpertSearch}
| gpl-2.0 |
MirkoRossini/django-altauth | src/main/python/altauth/views.py | 1 | 4972 | from django.shortcuts import render
from django.utils.crypto import get_random_string
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, make_password
from django.contrib.auth import login
from django.conf import settings
from altauth.models import AlternativePassword, PublicKey
from altauth.forms import (SetAlternativePasswordForm,
AlternativePasswordLoginForm, SetPublicKeyForm,
PublicKeyLoginForm)
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseForbidden)
from django.utils.translation import ugettext_lazy as _
@login_required
def set_alternative_password(request):
"""
View to set the alternative password on a user
"""
context = {}
if request.method == 'POST':
form = SetAlternativePasswordForm(request.POST)
if form.is_valid():
form.cleaned_data['passphrase'] = \
form.cleaned_data['passphrase'] or \
get_random_string(100)
passphrase = form.cleaned_data['passphrase']
salt = form.cleaned_data['salt'] or None
alternative_password = make_password(passphrase, salt)
ap_entry, created = AlternativePassword.objects.get_or_create(
user=request.user,
)
ap_entry.alternative_password = alternative_password
ap_entry.save()
context['alternative_password'] = passphrase
else:
form = SetAlternativePasswordForm() # An unbound form
context['form'] = form
return render(request, 'altauth/set_alternative_password.html', context)
def alternative_password_login(request):
"""
This view can be used to log in with the alternative
password.
"""
context = {}
if request.method == 'POST':
form = AlternativePasswordLoginForm(request.POST)
if form.is_valid():
user = form.user_cache
if user is not None:
if user.is_active:
login(request, user)
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
else:
form = AlternativePasswordLoginForm()
context['form'] = form
return render(request, 'altauth/alternative_password_login.html', context)
@login_required
def set_public_key(request):
"""
View to set the alternative password on a user
"""
context = {}
if request.method == 'POST':
form = SetPublicKeyForm(request.POST)
if form.is_valid():
pb_entry, created = PublicKey.objects.get_or_create(
user=request.user,
pubkey_type=form.cleaned_data['pubkey_type'],
)
pb_entry.public_key = form.cleaned_data['public_key']
pb_entry.save()
context['success'] = True
else:
form = SetPublicKeyForm() # An unbound form
context['form'] = form
return render(request, 'altauth/set_public_key.html', context)
def get_public_key_token(request, pubkey_type='RSA'):
"""
Returns a pubkey encripted token to allow log in through pubkey
authentication.
The returned value is a text/plain content encripted with the user's public key:
<2-digits token length > <token> <server public key>
"""
if request.method == 'POST' and request.POST['username']:
try:
user = User.objects.get(username=request.POST['username'])
public_key = PublicKey.objects.get(user=user,
pubkey_type=pubkey_type)
except PublicKey.DoesNotExist:
return HttpResponseForbidden(
_(('no public key available for '
'pubkey type %(pubkey_type)s')) %
{'pubkey_type': pubkey_type}
)
except User.DoesNotExist:
return HttpResponseForbidden(_("Invalid username: %(username)s") %
{'username': request.POST['username']})
login_token = public_key.generate_login_token()
return HttpResponse(login_token, content_type='text/plain')
else:
return HttpResponseForbidden(_("username not specified"))
def public_key_login(request):
"""
This view can be used to log in with a public key.
"""
context = {}
if request.method == 'POST':
form = PublicKeyLoginForm(request.POST)
if form.is_valid():
user = form.user_cache
if user is not None:
if user.is_active:
login(request, user)
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
else:
form = AlternativePasswordLoginForm()
context['form'] = form
return render(request, 'altauth/public_key_login.html', context)
| mit |
YelpArchive/pushmanager | pushmanager/tests/test_template_newrequest.py | 2 | 2211 | import testify as T
from pushmanager.testing.testservlet import TemplateTestCase
class NewRequestTemplateTest(TemplateTestCase):
authenticated = True
newrequest_page = 'modules/newrequest.html'
form_elements = ['title', 'tags', 'review', 'repo', 'branch', 'description', 'comments', 'watchers', 'takeover']
def test_request_form_labels(self):
tree = self.render_etree(self.newrequest_page)
form_attr = ['request-form-%s' % elem for elem in self.form_elements]
form_attr_with_id = ['takeover']
found_labels = []
for label in tree.iter('label'):
found_labels.append(label.attrib['for'])
if label.attrib['for'] in form_attr_with_id:
T.assert_equal(label.attrib['id'], '%s-label' % label.attrib['for'])
T.assert_sorted_equal(form_attr, found_labels)
def test_request_form_input(self):
tree = self.render_etree(self.newrequest_page)
id_attr = ['request-form-%s' % elem for elem in self.form_elements]
name_attr = ['request-%s' % elem for elem in self.form_elements]
found_id = []
found_name = []
for field in tree.iter('input'):
if 'type' not in field.attrib or field.attrib['type'] in ['checkbox']: # ignore hidden/submit
found_id.append(field.attrib['id'])
found_name.append(field.attrib['name'])
for textarea in tree.iter('textarea'):
found_id.append(textarea.attrib['id'])
found_name.append(textarea.attrib['name'])
T.assert_sorted_equal(id_attr, found_id)
T.assert_sorted_equal(name_attr, found_name)
tags = ['feature', 'fix', 'cleanup', 'buildbot', 'caches', 'pushplans',
'seagull', 'special', 'urgent', 'submodule-bump', 'hoods', 'stagea',
'stageb', 'no-verify']
def test_request_quicktags(self):
tree = self.render_etree(self.newrequest_page)
found_tags = []
for span in tree.iter('span'):
if span.attrib['class'] == 'tag-suggestion':
found_tags.append(span.text)
T.assert_sorted_equal(self.tags, found_tags)
if __name__ == '__main__':
T.run()
| apache-2.0 |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/sqlalchemy/testing/plugin/pytestplugin.py | 43 | 6224 | try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import pytest
import argparse
import inspect
import collections
import os
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(self, parser, namespace,
values, option_string=None):
callback_(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(
config.slaveinput["follower_ident"]
)
if config.option.write_idents:
with open(config.option.write_idents, "a") as file_:
file_.write(config.slaveinput["follower_ident"] + "\n")
else:
if config.option.write_idents and \
os.path.exists(config.option.write_idents):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(bool(getattr(config.option,
"cov_source", False)))
plugin_base.set_skip_test(pytest.skip.Exception)
def pytest_sessionstart(session):
plugin_base.post_begin()
def pytest_sessionfinish(session):
plugin_base.final_process_cleanup()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
items[:] = [
item for item in
items if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module):
if sub_cls is not test_class.cls:
list_ = rebuilt_items[test_class.cls]
for inst in pytest.Class(
sub_cls.__name__,
parent=test_class.parent.parent).collect():
list_.extend(inst.collect())
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls])
rebuilt_items[item.parent.cls][:] = []
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(newitems, key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name
))
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(obj):
return pytest.Class(name, parent=collector)
elif inspect.isfunction(obj) and \
isinstance(collector, pytest.Instance) and \
plugin_base.want_method(collector.cls, obj):
return pytest.Function(name, parent=collector)
else:
return []
_current_class = None
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(item, item.parent.module.__name__,
item.parent.cls, item.name)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
| gpl-3.0 |
manuelm/pyload | module/plugins/hoster/FileuploadNet.py | 5 | 1338 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster
class FileuploadNet(SimpleHoster):
__name__ = "FileuploadNet"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(en\.)?file-upload\.net/download-\d+/.+'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """File-upload.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
NAME_PATTERN = r'<title>File-Upload.net - (?P<N>.+?)<'
SIZE_PATTERN = r'</label><span>(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'Datei existiert nicht'
LINK_FREE_PATTERN = r"<a href='(.+?)' title='download' onclick"
def setup(self):
self.multiDL = True
self.chunk_limit = 1
| gpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_grp.py | 47 | 3165 | """Test script for the grp module."""
import unittest
from test import test_support
grp = test_support.import_module('grp')
class GroupDatabaseTestCase(unittest.TestCase):
def check_value(self, value):
# check that a grp tuple has the entries and
# attributes promised by the docs
self.assertEqual(len(value), 4)
self.assertEqual(value[0], value.gr_name)
self.assertIsInstance(value.gr_name, basestring)
self.assertEqual(value[1], value.gr_passwd)
self.assertIsInstance(value.gr_passwd, basestring)
self.assertEqual(value[2], value.gr_gid)
self.assertIsInstance(value.gr_gid, int)
self.assertEqual(value[3], value.gr_mem)
self.assertIsInstance(value.gr_mem, list)
def test_values(self):
entries = grp.getgrall()
for e in entries:
self.check_value(e)
if len(entries) > 1000: # Huge group file (NIS?) -- skip the rest
return
for e in entries:
e2 = grp.getgrgid(e.gr_gid)
self.check_value(e2)
self.assertEqual(e2.gr_gid, e.gr_gid)
name = e.gr_name
if name.startswith('+') or name.startswith('-'):
# NIS-related entry
continue
e2 = grp.getgrnam(name)
self.check_value(e2)
# There are instances where getgrall() returns group names in
# lowercase while getgrgid() returns proper casing.
# Discovered on Ubuntu 5.04 (custom).
self.assertEqual(e2.gr_name.lower(), name.lower())
def test_errors(self):
self.assertRaises(TypeError, grp.getgrgid)
self.assertRaises(TypeError, grp.getgrnam)
self.assertRaises(TypeError, grp.getgrall, 42)
# try to get some errors
bynames = {}
bygids = {}
for (n, p, g, mem) in grp.getgrall():
if not n or n == '+':
continue # skip NIS entries etc.
bynames[n] = g
bygids[g] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, grp.getgrnam, fakename)
# Choose a non-existent gid.
fakegid = 4127
while fakegid in bygids:
fakegid = (fakegid * 3) % 0x10000
self.assertRaises(KeyError, grp.getgrgid, fakegid)
def test_main():
test_support.run_unittest(GroupDatabaseTestCase)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
citrix-openstack-build/tempest | tempest/scenario/test_stamp_pattern.py | 3 | 7871 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from cinderclient import exceptions as cinder_exceptions
import testtools
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
import tempest.test
LOG = logging.getLogger(__name__)
class TestStampPattern(manager.OfficialClientTest):
"""
This test is for snapshotting an instance/volume and attaching the volume
created from snapshot to the instance booted from snapshot.
The following is the scenario outline:
1. Boot an instance "instance1"
2. Create a volume "volume1"
3. Attach volume1 to instance1
4. Create a filesystem on volume1
5. Mount volume1
6. Create a file which timestamp is written in volume1
7. Unmount volume1
8. Detach volume1 from instance1
9. Get a snapshot "snapshot_from_volume" of volume1
10. Get a snapshot "snapshot_from_instance" of instance1
11. Boot an instance "instance2" from snapshot_from_instance
12. Create a volume "volume2" from snapshot_from_volume
13. Attach volume2 to instance2
14. Check the existence of a file which created at 6. in volume2
"""
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
self.status_timeout(self.volume_client.volume_snapshots,
volume_snapshot.id, status)
def _boot_image(self, image_id):
create_kwargs = {
'key_name': self.keypair.name
}
return self.create_server(image=image_id, create_kwargs=create_kwargs)
def _add_keypair(self):
self.keypair = self.create_keypair()
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
self.addCleanup(floating_ip.delete)
return floating_ip
def _add_floating_ip(self, server, floating_ip):
server.add_floating_ip(floating_ip)
def _ssh_to_server(self, server_or_ip):
linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
def _create_volume_snapshot(self, volume):
snapshot_name = rand_name('scenario-snapshot-')
volume_snapshots = self.volume_client.volume_snapshots
snapshot = volume_snapshots.create(
volume.id, display_name=snapshot_name)
def cleaner():
volume_snapshots.delete(snapshot)
try:
while volume_snapshots.get(snapshot.id):
time.sleep(1)
except cinder_exceptions.NotFound:
pass
self.addCleanup(cleaner)
self._wait_for_volume_status(volume, 'available')
self._wait_for_volume_snapshot_status(snapshot, 'available')
self.assertEqual(snapshot_name, snapshot.display_name)
return snapshot
def _wait_for_volume_status(self, volume, status):
self.status_timeout(
self.volume_client.volumes, volume.id, status)
def _create_volume(self, snapshot_id=None):
return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
attach_volume_client = self.compute_client.volumes.create_server_volume
attached_volume = attach_volume_client(server.id,
volume.id,
'/dev/vdb')
self.assertEqual(volume.id, attached_volume.id)
self._wait_for_volume_status(attached_volume, 'in-use')
def _detach_volume(self, server, volume):
detach_volume_client = self.compute_client.volumes.delete_server_volume
detach_volume_client(server.id, volume.id)
self._wait_for_volume_status(volume, 'available')
def _wait_for_volume_availible_on_the_system(self, server_or_ip):
ssh = self.get_remote_client(server_or_ip)
conf = self.config
def _func():
part = ssh.get_partitions()
LOG.debug("Partitions:%s" % part)
return 'vdb' in part
if not tempest.test.call_until_true(_func,
conf.compute.build_timeout,
conf.compute.build_interval):
raise exceptions.TimeoutException
def _create_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb')
ssh_client.exec_command('sudo mount /dev/vdb /mnt')
ssh_client.exec_command('sudo sh -c "date > /mnt/timestamp;sync"')
self.timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
ssh_client.exec_command('sudo umount /mnt')
def _check_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('sudo mount /dev/vdb /mnt')
got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
@testtools.skip("Skipped until the Bug #1205344 is resolved.")
@tempest.test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
self.create_loginable_secgroup_rule()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
server = self._boot_image(self.config.compute.image_ref)
# create and add floating IP to server1
if self.config.compute.use_floatingip_for_ssh:
floating_ip_for_server = self._create_floating_ip()
self._add_floating_ip(server, floating_ip_for_server)
ip_for_server = floating_ip_for_server.ip
else:
ip_for_server = server
self._attach_volume(server, volume)
self._wait_for_volume_availible_on_the_system(ip_for_server)
self._create_timestamp(ip_for_server)
self._detach_volume(server, volume)
# snapshot the volume
volume_snapshot = self._create_volume_snapshot(volume)
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
volume_from_snapshot = self._create_volume(
snapshot_id=volume_snapshot.id)
# boot second instance from the snapshot(instance2)
server_from_snapshot = self._boot_image(snapshot_image.id)
# create and add floating IP to server_from_snapshot
if self.config.compute.use_floatingip_for_ssh:
floating_ip_for_snapshot = self._create_floating_ip()
self._add_floating_ip(server_from_snapshot,
floating_ip_for_snapshot)
ip_for_snapshot = floating_ip_for_snapshot.ip
else:
ip_for_snapshot = server_from_snapshot
# attach volume2 to instance2
self._attach_volume(server_from_snapshot, volume_from_snapshot)
self._wait_for_volume_availible_on_the_system(ip_for_snapshot)
# check the existence of the timestamp file in the volume2
self._check_timestamp(ip_for_snapshot)
| apache-2.0 |
double12gzh/nova | nova/api/openstack/compute/plugins/v3/flavors.py | 40 | 4993 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova import utils
ALIAS = 'flavors'
class FlavorsController(wsgi.Controller):
"""Flavor controller for the OpenStack API."""
_view_builder_class = flavors_view.V3ViewBuilder
@extensions.expected_errors(400)
def index(self, req):
"""Return all flavors in brief."""
limited_flavors = self._get_flavors(req)
return self._view_builder.index(req, limited_flavors)
@extensions.expected_errors(400)
def detail(self, req):
"""Return all flavors in detail."""
limited_flavors = self._get_flavors(req)
req.cache_db_flavors(limited_flavors)
return self._view_builder.detail(req, limited_flavors)
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given flavor id."""
context = req.environ['nova.context']
try:
flavor = flavors.get_flavor_by_flavor_id(id, ctxt=context)
req.cache_db_flavor(flavor)
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.show(req, flavor)
def _parse_is_public(self, is_public):
"""Parse is_public into something usable."""
if is_public is None:
# preserve default value of showing only public flavors
return True
elif utils.is_none_string(is_public):
return None
else:
try:
return strutils.bool_from_string(is_public, strict=True)
except ValueError:
msg = _('Invalid is_public filter [%s]') % is_public
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_flavors(self, req):
"""Helper function that returns a list of flavor dicts."""
filters = {}
sort_key = req.params.get('sort_key') or 'flavorid'
sort_dir = req.params.get('sort_dir') or 'asc'
limit, marker = common.get_limit_and_marker(req)
context = req.environ['nova.context']
if context.is_admin:
# Only admin has query access to all flavor types
filters['is_public'] = self._parse_is_public(
req.params.get('is_public', None))
else:
filters['is_public'] = True
filters['disabled'] = False
if 'minRam' in req.params:
try:
filters['min_memory_mb'] = int(req.params['minRam'])
except ValueError:
msg = _('Invalid min_ram filter [%s]') % req.params['minRam']
raise webob.exc.HTTPBadRequest(explanation=msg)
if 'minDisk' in req.params:
try:
filters['min_root_gb'] = int(req.params['minDisk'])
except ValueError:
msg = (_('Invalid minDisk filter [%s]') %
req.params['minDisk'])
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limited_flavors = flavors.get_all_flavors_sorted_list(context,
filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
return limited_flavors
class Flavors(extensions.V3APIExtensionBase):
"""Flavors Extension."""
name = "Flavors"
alias = ALIAS
version = 1
def get_resources(self):
collection_actions = {'detail': 'GET'}
member_actions = {'action': 'POST'}
resources = [
extensions.ResourceExtension(ALIAS,
FlavorsController(),
member_name='flavor',
collection_actions=collection_actions,
member_actions=member_actions)
]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
Aetet/react-calendar | node_modules/browserify/node_modules/insert-module-globals/node_modules/lexical-scope/node_modules/astw/node_modules/esprima-six/tools/generate-unicode-regex.py | 341 | 5096 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <utatane.tea@gmail.com>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
| mit |
dpetzold/django | django/db/__init__.py | 376 | 2322 | from django.core import signals
from django.db.utils import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, ConnectionHandler,
ConnectionRouter, DatabaseError, DataError, Error, IntegrityError,
InterfaceError, InternalError, NotSupportedError, OperationalError,
ProgrammingError,
)
__all__ = [
'backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError',
'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError',
'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY'
]
connections = ConnectionHandler()
router = ConnectionRouter()
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so we
# manually create the dictionary from the settings, passing only the settings
# that the database backends care about.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
def __delattr__(self, name):
return delattr(connections[DEFAULT_DB_ALIAS], name)
def __eq__(self, other):
return connections[DEFAULT_DB_ALIAS] == other
def __ne__(self, other):
return connections[DEFAULT_DB_ALIAS] != other
connection = DefaultConnectionProxy()
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
| bsd-3-clause |
boundary/wireshark | tools/dftestlib/string_type.py | 1 | 4979 | # Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
from dftestlib import dftest
class testString(dftest.DFTest):
trace_file = "http.cap"
def test_eq_1(self):
dfilter = 'http.request.method == "HEAD"'
self.assertDFilterCount(dfilter, 1)
def test_eq_2(self):
dfilter = 'http.request.method == "POST"'
self.assertDFilterCount(dfilter, 0)
def test_gt_1(self):
dfilter = 'http.request.method > "HEAC"'
self.assertDFilterCount(dfilter, 1)
def test_gt_2(self):
dfilter = 'http.request.method > "HEAD"'
self.assertDFilterCount(dfilter, 0)
def test_gt_3(self):
dfilter = 'http.request.method > "HEAE"'
self.assertDFilterCount(dfilter, 0)
def test_ge_1(self):
dfilter = 'http.request.method >= "HEAC"'
self.assertDFilterCount(dfilter, 1)
def test_ge_2(self):
dfilter = 'http.request.method >= "HEAD"'
self.assertDFilterCount(dfilter, 1)
def test_ge_3(self):
dfilter = 'http.request.method >= "HEAE"'
self.assertDFilterCount(dfilter, 0)
def test_lt_1(self):
dfilter = 'http.request.method < "HEAC"'
self.assertDFilterCount(dfilter, 0)
def test_lt_2(self):
dfilter = 'http.request.method < "HEAD"'
self.assertDFilterCount(dfilter, 0)
def test_lt_3(self):
dfilter = 'http.request.method < "HEAE"'
self.assertDFilterCount(dfilter, 1)
def test_le_1(self):
dfilter = 'http.request.method <= "HEAC"'
self.assertDFilterCount(dfilter, 0)
def test_le_2(self):
dfilter = 'http.request.method <= "HEAD"'
self.assertDFilterCount(dfilter, 1)
def test_le_3(self):
dfilter = 'http.request.method <= "HEAE"'
self.assertDFilterCount(dfilter, 1)
def test_slice_1(self):
dfilter = 'http.request.method[0] == "H"'
self.assertDFilterCount(dfilter, 1)
def test_slice_2(self):
dfilter = 'http.request.method[0] == "P"'
self.assertDFilterCount(dfilter, 0)
def test_slice_3(self):
dfilter = 'http.request.method[0:4] == "HEAD"'
self.assertDFilterCount(dfilter, 1)
def test_slice_4(self):
dfilter = 'http.request.method[0:4] != "HEAD"'
self.assertDFilterCount(dfilter, 0)
def test_slice_5(self):
dfilter = 'http.request.method[1:2] == "EA"'
self.assertDFilterCount(dfilter, 1)
def test_slice_6(self):
dfilter = 'http.request.method[1:2] > "EA"'
self.assertDFilterCount(dfilter, 0)
def test_slice_7(self):
dfilter = 'http.request.method[-1] == "D"'
self.assertDFilterCount(dfilter, 1)
def test_slice_8(self):
dfilter = 'http.request.method[-2] == "D"'
self.assertDFilterCount(dfilter, 0)
def xxxtest_stringz_1(self):
return self.DFilterCount(pkt_tftp,
'tftp.type == "octet"', 1)
def xxxtest_stringz_2(self):
return self.DFilterCount(pkt_tftp,
'tftp.type == "junk"', 0)
def test_contains_1(self):
dfilter = 'http.request.method contains "E"'
self.assertDFilterCount(dfilter, 1)
def test_contains_2(self):
dfilter = 'http.request.method contains "EA"'
self.assertDFilterCount(dfilter, 1)
def test_contains_3(self):
dfilter = 'http.request.method contains "HEAD"'
self.assertDFilterCount(dfilter, 1)
def test_contains_4(self):
dfilter = 'http.request.method contains "POST"'
self.assertDFilterCount(dfilter, 0)
def test_contains_5(self):
dfilter = 'http.request.method contains 50:4f:53:54' # "POST"
self.assertDFilterCount(dfilter, 0)
def test_contains_6(self):
dfilter = 'http.request.method contains 48:45:41:44' # "HEAD"
self.assertDFilterCount(dfilter, 1)
def test_contains_fail_0(self):
dfilter = 'http.user_agent contains "update"'
self.assertDFilterCount(dfilter, 0)
def test_contains_fail_1(self):
dfilter = 'http.user_agent contains "UPDATE"'
self.assertDFilterCount(dfilter, 0)
def test_contains_upper_0(self):
dfilter = 'upper(http.user_agent) contains "UPDATE"'
self.assertDFilterCount(dfilter, 1)
def test_contains_upper_1(self):
dfilter = 'upper(http.user_agent) contains "update"'
self.assertDFilterCount(dfilter, 0)
def test_contains_upper_2(self):
dfilter = 'upper(tcp.seq) == 4'
self.assertDFilterFail(dfilter)
def test_contains_lower_0(self):
dfilter = 'lower(http.user_agent) contains "UPDATE"'
self.assertDFilterCount(dfilter, 0)
def test_contains_lower_1(self):
dfilter = 'lower(http.user_agent) contains "update"'
self.assertDFilterCount(dfilter, 1)
def test_contains_lower_2(self):
dfilter = 'lower(tcp.seq) == 4'
self.assertDFilterFail(dfilter)
| gpl-2.0 |
yuanagain/seniorthesis | venv/lib/python3.5/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| mit |
NotKit/sonic-rush-tools | lz77.py | 1 | 6904 | # From https://github.com/magical/nlzss
# used http://code.google.com/p/u-lzss/source/browse/trunk/js/lib/ulzss.js as
# a guide
from sys import stderr
from collections import defaultdict
from operator import itemgetter
from struct import pack, unpack
class SlidingWindow:
# The size of the sliding window
size = 4096
# The minimum displacement.
disp_min = 2
# The hard minimum - a disp less than this can't be represented in the
# compressed stream.
disp_start = 1
# The minimum length for a successful match in the window
match_min = 1
# The maximum length of a successful match, inclusive.
match_max = None
def __init__(self, buf):
self.data = buf
self.hash = defaultdict(list)
self.full = False
self.start = 0
self.stop = 0
#self.index = self.disp_min - 1
self.index = 0
assert self.match_max is not None
def next(self):
if self.index < self.disp_start - 1:
self.index += 1
return
if self.full:
olditem = self.data[self.start]
assert self.hash[olditem][0] == self.start
self.hash[olditem].pop(0)
item = self.data[self.stop]
self.hash[item].append(self.stop)
self.stop += 1
self.index += 1
if self.full:
self.start += 1
else:
if self.size <= self.stop:
self.full = True
def advance(self, n=1):
"""Advance the window by n bytes"""
for _ in range(n):
self.next()
def search(self):
match_max = self.match_max
match_min = self.match_min
counts = []
indices = self.hash[self.data[self.index]]
for i in indices:
matchlen = self.match(i, self.index)
if matchlen >= match_min:
disp = self.index - i
#assert self.index - disp >= 0
#assert self.disp_min <= disp < self.size + self.disp_min
if self.disp_min <= disp:
counts.append((matchlen, -disp))
if matchlen >= match_max:
#assert matchlen == match_max
return counts[-1]
if counts:
match = max(counts, key=itemgetter(0))
return match
return None
def match(self, start, bufstart):
size = self.index - start
if size == 0:
return 0
matchlen = 0
it = range(min(len(self.data) - bufstart, self.match_max))
for i in it:
if self.data[start + (i % size)] == self.data[bufstart + i]:
matchlen += 1
else:
break
return matchlen
class NLZ10Window(SlidingWindow):
size = 4096
match_min = 3
match_max = 3 + 0xf
class NLZ11Window(SlidingWindow):
size = 4096
match_min = 3
match_max = 0x111 + 0xFFFF
class NOverlayWindow(NLZ10Window):
disp_min = 3
def _compress(input, windowclass=NLZ10Window):
"""Generates a stream of tokens. Either a byte (int) or a tuple of (count,
displacement)."""
window = windowclass(input)
i = 0
while True:
if len(input) <= i:
break
match = window.search()
if match:
yield match
#if match[1] == -283:
# raise Exception(match, i)
window.advance(match[0])
i += match[0]
else:
yield input[i]
window.next()
i += 1
def packflags(flags):
n = 0
for i in range(8):
n <<= 1
try:
if flags[i]:
n |= 1
except IndexError:
pass
return n
def chunkit(it, n):
buf = []
for x in it:
buf.append(x)
if n <= len(buf):
yield buf
buf = []
if buf:
yield buf
def compress(input, out):
# header
out.write(pack("<L", (len(input) << 8) + 0x10))
# body
length = 0
for tokens in chunkit(_compress(input), 8):
flags = [type(t) == tuple for t in tokens]
out.write(pack(">B", packflags(flags)))
for t in tokens:
if type(t) == tuple:
count, disp = t
count -= 3
disp = (-disp) - 1
assert 0 <= disp < 4096
sh = (count << 12) | disp
out.write(pack(">H", sh))
else:
if isinstance(t, str):
out.write(t)
else:
out.write(pack(">B", t))
length += 1
length += sum(2 if f else 1 for f in flags)
# padding
padding = 4 - (length % 4 or 4)
if padding:
out.write(b'\xff' * padding)
def compress_nlz11(input, out):
# header
out.write(pack("<L", (len(input) << 8) + 0x11))
# body
length = 0
for tokens in chunkit(_compress(input, windowclass=NLZ11Window), 8):
flags = [type(t) == tuple for t in tokens]
out.write(pack(">B", packflags(flags)))
length += 1
for t in tokens:
if type(t) == tuple:
count, disp = t
disp = (-disp) - 1
#if disp == 282:
# raise Exception
assert 0 <= disp <= 0xFFF
if count <= 1 + 0xF:
count -= 1
assert 2 <= count <= 0xF
sh = (count << 12) | disp
out.write(pack(">H", sh))
length += 2
elif count <= 0x11 + 0xFF:
count -= 0x11
assert 0 <= count <= 0xFF
b = count >> 4
sh = ((count & 0xF) << 12) | disp
out.write(pack(">BH", b, sh))
length += 3
elif count <= 0x111 + 0xFFFF:
count -= 0x111
assert 0 <= count <= 0xFFFF
l = (1 << 28) | (count << 12) | disp
out.write(pack(">L", l))
length += 4
else:
raise ValueError(count)
else:
out.write(pack(">B", t))
length += 1
# padding
padding = 4 - (length % 4 or 4)
if padding:
out.write(b'\xff' * padding)
def dump_compress_nlz11(input, out):
# body
length = 0
def dump():
for t in _compress(input, windowclass=NLZ11Window):
if type(t) == tuple:
yield t
from pprint import pprint
pprint(list(dump()))
if __name__ == '__main__':
from sys import stdout, argv
data = open(argv[1], "rb").read()
stdout = stdout.detach()
#compress(data, stdout)
compress_nlz11(data, stdout)
#dump_compress_nlz11(data, stdout)
| mit |
faith0811/zerorpc-python | tests/test_buffered_channel.py | 14 | 16294 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2015 François-Xavier Bourlet (bombela+zerorpc@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import sys
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint, TIME_FACTOR
def test_close_server_bufchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
client_bufchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
server_bufchan.recv()
gevent.sleep(TIME_FACTOR * 3)
print 'CLOSE SERVER SOCKET!!!'
server_bufchan.close()
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, client_bufchan.recv)
else:
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
print 'CLIENT LOST SERVER :)'
client_bufchan.close()
server.close()
client.close()
def test_close_client_bufchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
client_bufchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
server_bufchan.recv()
gevent.sleep(TIME_FACTOR * 3)
print 'CLOSE CLIENT SOCKET!!!'
client_bufchan.close()
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, client_bufchan.recv)
else:
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
print 'SERVER LOST CLIENT :)'
server_bufchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_server_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
gevent.sleep(TIME_FACTOR * 3)
print 'CLOSE SERVER SOCKET!!!'
server_bufchan.close()
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, client_bufchan.recv)
else:
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
print 'CLIENT LOST SERVER :)'
client_bufchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_client_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
def server_fn():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
try:
while True:
gevent.sleep(1)
finally:
server_bufchan.close()
server_coro = gevent.spawn(server_fn)
gevent.sleep(TIME_FACTOR * 3)
print 'CLOSE CLIENT SOCKET!!!'
client_bufchan.close()
client.close()
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, server_coro.get())
else:
with assert_raises(zerorpc.LostRemote):
server_coro.get()
print 'SERVER LOST CLIENT :)'
server.close()
def test_do_some_req_rep():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
for x in xrange(20):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert list(event.args) == [x + x * x]
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
for x in xrange(20):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_lost_server():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
print 'running'
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
for x in xrange(10):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert list(event.args) == [x + x * x]
client_bufchan.emit('add', (x, x * x))
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, client_bufchan.recv)
else:
with assert_raises(zerorpc.LostRemote):
client_bufchan.recv()
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
for x in xrange(10):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_lost_client():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
for x in xrange(10):
client_bufchan.emit('add', (x, x * x))
event = client_bufchan.recv()
assert event.name == 'OK'
assert list(event.args) == [x + x * x]
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
for x in xrange(10):
event = server_bufchan.recv()
assert event.name == 'add'
server_bufchan.emit('OK', (sum(event.args),))
if sys.version_info < (2, 7):
assert_raises(zerorpc.LostRemote, server_bufchan.recv)
else:
with assert_raises(zerorpc.LostRemote):
server_bufchan.recv()
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_do_some_req_rep_client_timeout():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
for x in xrange(10):
client_bufchan.emit('sleep', (x,))
event = client_bufchan.recv(timeout=TIME_FACTOR * 3)
assert event.name == 'OK'
assert list(event.args) == [x]
assert_raises(zerorpc.TimeoutExpired, _do_with_assert_raises)
else:
with assert_raises(zerorpc.TimeoutExpired):
for x in xrange(10):
client_bufchan.emit('sleep', (x,))
event = client_bufchan.recv(timeout=TIME_FACTOR * 3)
assert event.name == 'OK'
assert list(event.args) == [x]
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
for x in xrange(20):
event = server_bufchan.recv()
assert event.name == 'sleep'
gevent.sleep(TIME_FACTOR * event.args[0])
server_bufchan.emit('OK', event.args)
assert_raises(zerorpc.LostRemote, _do_with_assert_raises)
else:
with assert_raises(zerorpc.LostRemote):
for x in xrange(20):
event = server_bufchan.recv()
assert event.name == 'sleep'
gevent.sleep(TIME_FACTOR * event.args[0])
server_bufchan.emit('OK', event.args)
server_bufchan.close()
coro_pool.spawn(server_do)
coro_pool.join()
client.close()
server.close()
def test_congestion_control_server_pushing():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.ROUTER)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
read_cnt = 0
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=TIME_FACTOR * 2)
client_bufchan = zerorpc.BufferedChannel(client_hbchan, inqueue_size=100)
for x in xrange(200):
event = client_bufchan.recv()
assert event.name == 'coucou'
assert event.args == x
global read_cnt
read_cnt += 1
client_bufchan.close()
coro_pool = gevent.pool.Pool()
coro_pool.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=TIME_FACTOR * 2)
server_bufchan = zerorpc.BufferedChannel(server_hbchan, inqueue_size=100)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
for x in xrange(200):
server_bufchan.emit('coucou', x, timeout=0) # will fail when x == 1
assert_raises(zerorpc.TimeoutExpired, _do_with_assert_raises)
else:
with assert_raises(zerorpc.TimeoutExpired):
for x in xrange(200):
server_bufchan.emit('coucou', x, timeout=0) # will fail when x == 1
server_bufchan.emit('coucou', 1) # block until receiver is ready
if sys.version_info < (2, 7):
def _do_with_assert_raises():
for x in xrange(2, 200):
server_bufchan.emit('coucou', x, timeout=0) # will fail when x == 100
assert_raises(zerorpc.TimeoutExpired, _do_with_assert_raises)
else:
with assert_raises(zerorpc.TimeoutExpired):
for x in xrange(2, 200):
server_bufchan.emit('coucou', x, timeout=0) # will fail when x == 100
for x in xrange(read_cnt, 200):
server_bufchan.emit('coucou', x) # block until receiver is ready
server_bufchan.close()
coro_pool.spawn(server_do)
try:
coro_pool.join()
except zerorpc.LostRemote:
pass
finally:
client.close()
server.close()
| mit |
duducosmos/pgs4a | python-install/lib/python2.7/encodings/zlib_codec.py | 533 | 3015 | """ Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-2.1 |
NicCOConnor/ansible-modules-core | cloud/amazon/cloudformation.py | 10 | 14498 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches an AWS CloudFormation stack and waits for it complete.
version_added: "1.1"
options:
stack_name:
description:
- name of the cloudformation stack
required: true
default: null
aliases: []
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack
required: false
default: "false"
choices: [ "true", "false" ]
aliases: []
template_parameters:
description:
- a list of hashes of all the template variables for the stack
required: false
default: {}
aliases: []
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is "absent", stack will be removed.
required: true
default: null
aliases: []
template:
description:
- The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present"
Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json"
required: false
default: null
aliases: []
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
required: false
default: null
version_added: "2.0"
stack_policy:
description:
- the path of the cloudformation stack policy
required: false
default: null
aliases: []
version_added: "x.x"
tags:
description:
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
Requires at least Boto version 2.6.0.
required: false
default: null
aliases: []
version_added: "1.4"
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present"
required: false
version_added: "2.0"
template_format:
description: For local templates, allows specification of json or yaml format
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic task example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Removal example
- name: tear down old deployment
cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Use a template from a URL
- name: launch ansible cloudformation example
cloudformation:
stack_name="ansible-cloudformation" state=present
region=us-east-1 disable_rollback=true
template_url=https://s3.amazonaws.com/my-bucket/cloudformation.template
args:
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
'''
import json
import time
import yaml
try:
import boto
import boto.cloudformation.connection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def boto_version_required(version_tuple):
parts = boto.Version.split('.')
boto_version = []
try:
for part in parts:
boto_version.append(int(part))
except:
boto_version.append(-1)
return tuple(boto_version) >= tuple(version_tuple)
def stack_operation(cfn, stack_name, operation):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
result = {}
operation_complete = False
while operation_complete == False:
try:
stack = invoke_with_throttling_retries(cfn.describe_stacks, stack_name)[0]
existed.append('yes')
except:
if 'yes' in existed:
result = dict(changed=True,
output='Stack Deleted',
events=map(str, list(stack.describe_events())))
else:
result = dict(changed= True, output='Stack Not Found')
break
if '%s_COMPLETE' % operation == stack.stack_status:
result = dict(changed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s complete' % operation)
break
if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Problem with %s. Rollback complete' % operation)
break
elif '%s_FAILED' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s failed' % operation)
break
elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s rollback failed' % operation)
break
else:
time.sleep(5)
return result
IGNORE_CODE = 'Throttling'
MAX_RETRIES=3
def invoke_with_throttling_retries(function_ref, *argv):
retries=0
while True:
try:
retval=function_ref(*argv)
return retval
except boto.exception.BotoServerError, e:
if e.code != IGNORE_CODE or retries==MAX_RETRIES:
raise e
time.sleep(5 * (2**retries))
retries += 1
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False),
template_format=dict(default='json', choices=['json', 'yaml'], required=False),
tags=dict(default=None)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template']],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if module.params['template'] is None and module.params['template_url'] is None:
module.fail_json(msg='Either template or template_url expected')
state = module.params['state']
stack_name = module.params['stack_name']
if module.params['template'] is None and module.params['template_url'] is None:
if state == 'present':
module.fail_json('Module parameter "template" or "template_url" is required if "state" is "present"')
if module.params['template'] is not None:
template_body = open(module.params['template'], 'r').read()
else:
template_body = None
if module.params['template_format'] == 'yaml':
if template_body is None:
module.fail_json(msg='yaml format only supported for local templates')
else:
template_body = json.dumps(yaml.load(template_body), indent=2)
notification_arns = module.params['notification_arns']
if module.params['stack_policy'] is not None:
stack_policy_body = open(module.params['stack_policy'], 'r').read()
else:
stack_policy_body = None
disable_rollback = module.params['disable_rollback']
template_parameters = module.params['template_parameters']
tags = module.params['tags']
template_url = module.params['template_url']
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
kwargs = dict()
if tags is not None:
if not boto_version_required((2,6,0)):
module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0')
kwargs['tags'] = tags
# convert the template parameters ansible passes into a tuple for boto
template_parameters_tup = [(k, v) for k, v in template_parameters.items()]
stack_outputs = {}
try:
cfn = boto.cloudformation.connect_to_region(
region,
**aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
update = False
result = {}
operation = None
# if state is present we are going to ensure that the stack is either
# created or updated
if state == 'present':
try:
cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body,
template_url=template_url,
disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'],
**kwargs)
operation = 'CREATE'
except Exception, err:
error_msg = boto_exception(err)
if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg:
update = True
else:
module.fail_json(msg=error_msg)
if not update:
result = stack_operation(cfn, stack_name, operation)
# if the state is present and the stack already exists, we try to update it
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
if update:
try:
cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback,
template_url=template_url,
capabilities=['CAPABILITY_IAM'])
operation = 'UPDATE'
except Exception, err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg=error_msg)
if operation == 'UPDATE':
result = stack_operation(cfn, stack_name, operation)
# check the status of the stack while we are creating/updating it.
# and get the outputs of the stack
if state == 'present' or update:
stack = invoke_with_throttling_retries(cfn.describe_stacks,stack_name)[0]
for output in stack.outputs:
stack_outputs[output.key] = output.value
result['stack_outputs'] = stack_outputs
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
if state == 'absent':
try:
invoke_with_throttling_retries(cfn.describe_stacks,stack_name)
operation = 'DELETE'
except Exception, err:
error_msg = boto_exception(err)
if 'Stack:%s does not exist' % stack_name in error_msg:
result = dict(changed=False, output='Stack not found.')
else:
module.fail_json(msg=error_msg)
if operation == 'DELETE':
cfn.delete_stack(stack_name)
result = stack_operation(cfn, stack_name, operation)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
vvv1559/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pgen2/tokenize.py | 115 | 19125 | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, start, end, line): # for testing
(srow, scol) = start
(erow, ecol) = end
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| apache-2.0 |
xiaowei942/YouCompleteMe | python/ycm/tests/vimsupport_test.py | 17 | 21475 | #!/usr/bin/env python
#
# Copyright (C) 2015 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm import vimsupport
from nose.tools import eq_
def ReplaceChunk_SingleLine_Repl_1_test():
# Replace with longer range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 1, 1, 5 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'How long',
0,
0,
result_buffer )
eq_( [ "How long is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 4 )
# and replace again, using delta
start, end = _BuildLocations( 1, 10, 1, 11 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
' piece of ',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ 'How long is a piece of string' ], result_buffer )
eq_( new_line_offset, 0 )
eq_( new_char_offset, 9 )
eq_( line_offset, 0 )
eq_( char_offset, 13 )
# and once more, for luck
start, end = _BuildLocations( 1, 11, 1, 17 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'pie',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( ['How long is a piece of pie' ], result_buffer )
eq_( new_line_offset, 0 )
eq_( new_char_offset, -3 )
eq_( line_offset, 0 )
eq_( char_offset, 10 )
def ReplaceChunk_SingleLine_Repl_2_test():
# Replace with shorter range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 11, 1, 17 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'test',
0,
0,
result_buffer )
eq_( [ "This is a test" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -2 )
def ReplaceChunk_SingleLine_Repl_3_test():
# Replace with equal range
# 12345678901234567
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 6, 1, 8 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'be',
0,
0,
result_buffer )
eq_( [ "This be a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleLine_Add_1_test():
# Insert at start
result_buffer = [ "is a string" ]
start, end = _BuildLocations( 1, 1, 1, 1 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'This ',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 5 )
def ReplaceChunk_SingleLine_Add_2_test():
# Insert at end
result_buffer = [ "This is a " ]
start, end = _BuildLocations( 1, 11, 1, 11 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'string',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 6 )
def ReplaceChunk_SingleLine_Add_3_test():
# Insert in the middle
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 8, 1, 8 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
' not',
0,
0,
result_buffer )
eq_( [ "This is not a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 4 )
def ReplaceChunk_SingleLine_Del_1_test():
# Delete from start
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 1, 1, 6 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -5 )
def ReplaceChunk_SingleLine_Del_2_test():
# Delete from end
result_buffer = [ "This is a string" ]
start, end = _BuildLocations( 1, 10, 1, 18 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "This is a" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -8 )
def ReplaceChunk_SingleLine_Del_3_test():
# Delete from middle
result_buffer = [ "This is not a string" ]
start, end = _BuildLocations( 1, 9, 1, 13 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'',
0,
0,
result_buffer )
eq_( [ "This is a string" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -4 )
def ReplaceChunk_RemoveSingleLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 1, 3, 1 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, '',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLines_test():
result_buffer = [ "aAa",
"aBa",
"aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aAa",
"aEb",
"bFBa",
"aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
# now make another change to the "2nd" line
start, end = _BuildLocations( 2, 3, 2, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aEb", "bFBcccc", "aCa" ], result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 4 )
def ReplaceChunk_SingleToMultipleLines2_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nG',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb" ,"bFb", "GBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLines3_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb" ,"bFb", "bGbBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleToMultipleLinesReplace_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 2, 1, 4 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aEb", "bFb", "bGb", "aBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
def ReplaceChunk_SingleToMultipleLinesReplace_2_test():
result_buffer = [ "aAa",
"aBa",
"aCa" ]
start, end = _BuildLocations( 1, 2, 1, 4 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbGb',
0,
0,
result_buffer )
expected_buffer = [ "aEb",
"bFb",
"bGb",
"aBa",
"aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 0 )
# now do a subsequent change (insert at end of line "1")
start, end = _BuildLocations( 1, 4, 1, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aEb",
"bFb",
"bGbcccc",
"aBa",
"aCa" ], result_buffer )
eq_( line_offset, 2 )
eq_( char_offset, 4 )
def ReplaceChunk_MultipleLinesToSingleLine_test():
result_buffer = [ "aAa", "aBa", "aCaaaa" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'E',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aECaaaa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 1 )
# make another modification applying offsets
start, end = _BuildLocations( 3, 3, 3, 4 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'cccc',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aECccccaaa" ], result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 4 )
# and another, for luck
start, end = _BuildLocations( 3, 4, 3, 5 )
( new_line_offset, new_char_offset ) = vimsupport.ReplaceChunk(
start,
end,
'dd\ndd',
line_offset,
char_offset,
result_buffer )
line_offset += new_line_offset
char_offset += new_char_offset
eq_( [ "aAa", "aECccccdd", "ddaa" ], result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -2 )
def ReplaceChunk_MultipleLinesToSameMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aEb", "bFCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToMoreMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 2, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'Eb\nbFb\nbG',
0,
0,
result_buffer )
expected_buffer = [ "aAa", "aEb", "bFb", "bGCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToLessMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 1, 2, 3, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aEb", "bFCa", "aDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 1 )
def ReplaceChunk_MultipleLinesToEvenLessMultipleLines_test():
result_buffer = [ "aAa", "aBa", "aCa", "aDe" ]
start, end = _BuildLocations( 1, 2, 4, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Eb\nbF',
0, 0, result_buffer )
expected_buffer = [ "aEb", "bFDe" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -2 )
eq_( char_offset, 1 )
def ReplaceChunk_SpanBufferEdge_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "bDba", "aBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 1 )
def ReplaceChunk_DeleteTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, '',
0, 0, result_buffer )
expected_buffer = [ "aAa", "aa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, -1 )
def ReplaceChunk_AddTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "aAa", "abDbBa", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 3 )
def ReplaceChunk_ReplaceTextInLine_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 2, 2, 2, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
0, 0, result_buffer )
expected_buffer = [ "aAa", "abDba", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
1, 1, result_buffer )
expected_buffer = [ "aAa", "abDba", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 0 )
eq_( char_offset, 2 )
def ReplaceChunk_SingleLineToMultipleLinesOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 1, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'Db\nE',
1, 1, result_buffer )
expected_buffer = [ "aAa", "aDb", "Ea", "aCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, -1 )
def ReplaceChunk_MultipleLinesToSingleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 1, 1, 2, 2 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start, end, 'bDb',
1, 1, result_buffer )
expected_buffer = [ "aAa", "abDbCa" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, -1 )
eq_( char_offset, 3 )
def ReplaceChunk_MultipleLineOffsetWorks_test():
result_buffer = [ "aAa", "aBa", "aCa" ]
start, end = _BuildLocations( 3, 1, 4, 3 )
( line_offset, char_offset ) = vimsupport.ReplaceChunk( start,
end,
'bDb\nbEb\nbFb',
-1,
1,
result_buffer )
expected_buffer = [ "aAa", "abDb", "bEb", "bFba" ]
eq_( expected_buffer, result_buffer )
eq_( line_offset, 1 )
eq_( char_offset, 1 )
def _BuildLocations( start_line, start_column, end_line, end_column ):
return {
'line_num' : start_line,
'column_num': start_column,
}, {
'line_num' : end_line,
'column_num': end_column,
}
| gpl-3.0 |
DavidPurcell/murano_temp | murano/dsl/principal_objects/garbage_collector.py | 2 | 3152 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from yaql.language import specs
from yaql.language import yaqltypes
from murano.dsl import dsl
from murano.dsl import helpers
@dsl.name('io.murano.system.GC')
class GarbageCollector(object):
@staticmethod
@specs.parameter('publisher', dsl.MuranoObjectParameter(decorate=False))
@specs.parameter('subscriber', dsl.MuranoObjectParameter(decorate=False))
@specs.parameter('handler', yaqltypes.String(nullable=True))
def subscribe_destruction(publisher, subscriber, handler=None):
publisher_this = publisher.real_this
subscriber_this = subscriber.real_this
if handler:
subscriber.type.find_single_method(handler)
dependency = GarbageCollector._find_dependency(
publisher_this, subscriber_this, handler)
if not dependency:
dependency = {'subscriber': helpers.weak_ref(subscriber_this),
'handler': handler}
publisher_this.destruction_dependencies.append(dependency)
@staticmethod
@specs.parameter('publisher', dsl.MuranoObjectParameter(decorate=False))
@specs.parameter('subscriber', dsl.MuranoObjectParameter(decorate=False))
@specs.parameter('handler', yaqltypes.String(nullable=True))
def unsubscribe_destruction(publisher, subscriber, handler=None):
publisher_this = publisher.real_this
subscriber_this = subscriber.real_this
if handler:
subscriber.type.find_single_method(handler)
dds = publisher_this.destruction_dependencies
dependency = GarbageCollector._find_dependency(
publisher_this, subscriber_this, handler)
if dependency:
dds.remove(dependency)
@staticmethod
def _find_dependency(publisher, subscriber, handler):
dds = publisher.destruction_dependencies
for dd in dds:
if dd['handler'] != handler:
continue
d_subscriber = dd['subscriber']
if d_subscriber:
d_subscriber = d_subscriber()
if d_subscriber == subscriber:
return dd
@staticmethod
def collect():
helpers.get_executor().object_store.cleanup()
@staticmethod
@specs.parameter('object_', dsl.MuranoObjectParameter(decorate=False))
def is_doomed(object_):
return helpers.get_object_store().is_doomed(object_)
@staticmethod
@specs.parameter('object_', dsl.MuranoObjectParameter(decorate=False))
def is_destroyed(object_):
return object_.destroyed
| apache-2.0 |
CptLemming/libsaas | libsaas/services/desk/users.py | 4 | 2749 | from libsaas import http, parsers
from libsaas.services import base
from . import resource
class ResourceMixin(object):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Groups(ResourceMixin, resource.PaginatedDeskResource):
path = 'groups'
class Group(ResourceMixin, resource.DeskResource):
path = 'groups'
@base.apimethod
def group_filters(self, per_page=None, page=None):
"""
Retrieve a paginated list of all filters for the given group.
Upstream documentation: http://dev.desk.com/API/groups#list-filters
"""
params = base.get_params(None, locals())
url = '{0}/{1}'.format(self.get_url(), 'filters')
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def users(self, per_page=None, page=None):
"""
Retrieve a paginated list of all users for the given group.
Upstream documentation: http://dev.desk.com/API/groups#list-users
"""
params = base.get_params(None, locals())
url = '{0}/{1}'.format(self.get_url(), 'users')
return http.Request('GET', url, params), parsers.parse_json
class Users(ResourceMixin, resource.PaginatedDeskResource):
path = 'users'
class User(ResourceMixin, resource.DeskResource):
path = 'users'
@base.apimethod
def preferences(self, per_page=None, page=None):
"""
List all of the user's preferences.
Upstream documentation: http://dev.desk.com/API/users/#preferences-list
"""
params = base.get_params(None, locals())
url = '{0}/{1}'.format(self.get_url(), 'preferences')
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def preference(self, preference_id):
"""
Show a single user preference
Upstream documentation: http://dev.desk.com/API/users/#preferences-show
"""
url = '{0}/{1}/{2}'.format(self.get_url(), 'preferences', preference_id)
return http.Request('GET', url), parsers.parse_json
@base.apimethod
def update_preference(self, preference_id, obj):
"""
Update a user preference
Upstream documentation: http://dev.desk.com/API/users/#preferences
"""
url = '{0}/{1}/{2}'.format(self.get_url(), 'preferences', preference_id)
request = http.Request('PATCH', url, self.wrap_object(obj))
return request, parsers.parse_json
class SiteSettings(ResourceMixin, resource.PaginatedDeskResource):
path = 'site_settings'
| mit |
40223101/w17test | static/Brython3.1.0-20150301-090019/Lib/multiprocessing/pool.py | 694 | 23263 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| gpl-3.0 |
amith01994/intellij-community | python/helpers/pydev/pydevd_constants.py | 39 | 8413 | '''
This module holds the constants used for specifying the states of the debugger.
'''
STATE_RUN = 1
STATE_SUSPEND = 2
PYTHON_SUSPEND = 1
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
class DebugInfoHolder:
#we have to put it here because it can be set through the command line (so, the
#already imported references would not have it).
DEBUG_RECORD_SOCKET_READS = False
DEBUG_TRACE_LEVEL = -1
DEBUG_TRACE_BREAKPOINTS = -1
#Optimize with psyco? This gave a 50% speedup in the debugger in tests
USE_PSYCO_OPTIMIZATION = True
#Hold a reference to the original _getframe (because psyco will change that as soon as it's imported)
import sys #Note: the sys import must be here anyways (others depend on it)
try:
GetFrame = sys._getframe
except AttributeError:
def GetFrame():
raise AssertionError('sys._getframe not available (possible causes: enable -X:Frames on IronPython?)')
#Used to determine the maximum size of each variable passed to eclipse -- having a big value here may make
#the communication slower -- as the variables are being gathered lazily in the latest version of eclipse,
#this value was raised from 200 to 1000.
MAXIMUM_VARIABLE_REPRESENTATION_SIZE = 1000
import os
import pydevd_vm_type
IS_JYTHON = pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON
IS_JYTH_LESS25 = False
if IS_JYTHON:
if sys.version_info[0] == 2 and sys.version_info[1] < 5:
IS_JYTH_LESS25 = True
#=======================================================================================================================
# Python 3?
#=======================================================================================================================
IS_PY3K = False
IS_PY27 = False
IS_PY24 = False
try:
if sys.version_info[0] >= 3:
IS_PY3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 7:
IS_PY27 = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except AttributeError:
pass #Not all versions have sys.version_info
try:
IS_64_BITS = sys.maxsize > 2 ** 32
except AttributeError:
try:
import struct
IS_64_BITS = struct.calcsize("P") * 8 > 32
except:
IS_64_BITS = False
SUPPORT_GEVENT = os.getenv('GEVENT_SUPPORT', 'False') == 'True'
USE_LIB_COPY = SUPPORT_GEVENT and not IS_PY3K and sys.version_info[1] >= 6
import _pydev_threading as threading
from _pydev_imps import _pydev_thread
_nextThreadIdLock = _pydev_thread.allocate_lock()
#=======================================================================================================================
# Jython?
#=======================================================================================================================
try:
DictContains = dict.has_key
except:
try:
#Py3k does not have has_key anymore, and older versions don't have __contains__
DictContains = dict.__contains__
except:
try:
DictContains = dict.has_key
except NameError:
def DictContains(d, key):
return d.has_key(key)
#=======================================================================================================================
# Jython?
#=======================================================================================================================
try:
DictPop = dict.pop
except:
def DictPop(d, key, default=None):
try:
ret = d[key]
del d[key]
return ret
except:
return default
if IS_PY3K:
def DictKeys(d):
return list(d.keys())
def DictValues(d):
return list(d.values())
DictIterValues = dict.values
def DictIterItems(d):
return d.items()
def DictItems(d):
return list(d.items())
else:
DictKeys = dict.keys
try:
DictIterValues = dict.itervalues
except:
DictIterValues = dict.values #Older versions don't have the itervalues
DictValues = dict.values
def DictIterItems(d):
return d.iteritems()
def DictItems(d):
return d.items()
try:
xrange = xrange
except:
#Python 3k does not have it
xrange = range
try:
import itertools
izip = itertools.izip
except:
izip = zip
try:
object
except NameError:
class object:
pass
try:
enumerate
except:
def enumerate(lst):
ret = []
i = 0
for element in lst:
ret.append((i, element))
i += 1
return ret
#=======================================================================================================================
# StringIO
#=======================================================================================================================
try:
from StringIO import StringIO
except:
from io import StringIO
#=======================================================================================================================
# NextId
#=======================================================================================================================
class NextId:
def __init__(self):
self._id = 0
def __call__(self):
#No need to synchronize here
self._id += 1
return self._id
_nextThreadId = NextId()
#=======================================================================================================================
# GetThreadId
#=======================================================================================================================
def GetThreadId(thread):
try:
return thread.__pydevd_id__
except AttributeError:
_nextThreadIdLock.acquire()
try:
#We do a new check with the lock in place just to be sure that nothing changed
if not hasattr(thread, '__pydevd_id__'):
try:
pid = os.getpid()
except AttributeError:
try:
#Jython does not have it!
import java.lang.management.ManagementFactory #@UnresolvedImport -- just for jython
pid = java.lang.management.ManagementFactory.getRuntimeMXBean().getName()
pid = pid.replace('@', '_')
except:
#ok, no pid available (will be unable to debug multiple processes)
pid = '000001'
thread.__pydevd_id__ = 'pid%s_seq%s' % (pid, _nextThreadId())
finally:
_nextThreadIdLock.release()
return thread.__pydevd_id__
#===============================================================================
# Null
#===============================================================================
class Null:
"""
Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
"""
def __init__(self, *args, **kwargs):
return None
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
if len(mname) > 4 and mname[:2] == '__' and mname[-2:] == '__':
# Don't pretend to implement special method names.
raise AttributeError(mname)
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return "<Null>"
def __str__(self):
return "Null"
def __len__(self):
return 0
def __getitem__(self):
return self
def __setitem__(self, *args, **kwargs):
pass
def write(self, *args, **kwargs):
pass
def __nonzero__(self):
return 0
def __iter__(self):
return iter(())
def call_only_once(func):
'''
To be used as a decorator
@call_only_once
def func():
print 'Calling func only this time'
Actually, in PyDev it must be called as:
func = call_only_once(func) to support older versions of Python.
'''
def new_func(*args, **kwargs):
if not new_func._called:
new_func._called = True
return func(*args, **kwargs)
new_func._called = False
return new_func
if __name__ == '__main__':
if Null():
sys.stdout.write('here\n')
| apache-2.0 |
littlstar/chromium.src | tools/gyp-explain.py | 153 | 3035 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py [--dot] chrome_dll# gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def EscapeForDot(string):
suffix = '#target'
if string.endswith(suffix):
string = string[:-len(suffix)]
string = string.replace('\\', '\\\\')
return '"' + string + '"'
def GenerateDot(fro, to, paths):
"""Generates an input file for graphviz's dot program."""
prefixes = [os.path.commonprefix(path) for path in paths]
prefix = os.path.commonprefix(prefixes)
print '// Build with "dot -Tpng -ooutput.png this_file.dot"'
# "strict" collapses common paths.
print 'strict digraph {'
for path in paths:
print (' -> '.join(EscapeForDot(item[len(prefix):]) for item in path)), ';'
print '}'
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except OSError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) not in (3, 4):
usage()
sys.exit(1)
generate_dot = argv[1] == '--dot'
if generate_dot:
argv.pop(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
if generate_dot:
GenerateDot(fro, to, paths)
else:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
prataprc/tayra | tayra/test/stdttl/ref/empty.ttl.py | 1 | 1621 | import imp
from io import StringIO
from pluggdapps.plugin import Plugin, implements
from tayra import BaseTTLPlugin
def __traceback_decorator__( frames ):
from copy import deepcopy
from os.path import basename
def _map2ttl( frame ):
filename = frame.filename
lineno = frame.lineno
lines = open(filename).readlines()[:lineno]
lines.reverse()
rc = {}
for l in lines :
if l.strip().startswith('# lineno') :
_, ttl_lineno = l.split(':', 1)
ttl_lineno = int( ttl_lineno )
ttl_text = open( _ttlfile ).readlines()[ ttl_lineno-1 ]
return ttl_lineno, ttl_text
return None, None
newframes = []
for frame in frames :
newframes.append( frame )
frameadded = getattr( frame, '_ttlframeadded', False )
basen = basename( frame.filename )
if basen.endswith( '.ttl.py' ) and basen == (basename( _ttlfile ) + '.py') and frameadded == False :
newframe = deepcopy( frame )
frame._ttlframeadded = True
try :
newframe.lineno, newframe.linetext = _map2ttl( newframe )
if newframe.lineno :
newframe.filename = _ttlfile
newframes.append( newframe )
except :
raise
continue
return newframes
def body( *args, **kwargs ) :
_m.pushbuf()
return _m.popbuftext()
# ---- Global Functions
# ---- Interface functions
# ---- Footer
| gpl-3.0 |
jaredhasenklein/the-blue-alliance | tests/test_add_surrogates.py | 5 | 1559 | import unittest2
from appengine_fixture_loader.loader import load_fixture
from google.appengine.ext import testbed
from google.appengine.ext import ndb
from helpers.match_helper import MatchHelper
from models.event import Event
from models.match import Match
class TestAddSurrogates(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
load_fixture('test_data/2016cama_no_surrogate.json',
kind={'Event': Event, 'Match': Match},
post_processor=self.eventKeyAdder)
self.event = Event.get_by_id('2016cama')
self.assertIsNotNone(self.event)
def tearDown(self):
self.testbed.deactivate()
def eventKeyAdder(self, obj):
obj.event = ndb.Key(Event, '2016cama')
def test_event_winner(self):
MatchHelper.add_surrogates(self.event)
for match in self.event.matches:
if match.comp_level != 'qm' or match.match_number != 18:
for alliance_color in ['red', 'blue']:
self.assertEqual(match.alliances[alliance_color]['surrogates'], [])
else:
self.assertEqual(match.alliances['red']['surrogates'], ['frc5496'])
self.assertEqual(match.alliances['blue']['surrogates'], ['frc1323'])
| mit |
hendradarwin/VTK | ThirdParty/Twisted/twisted/names/test/test_rootresolve.py | 22 | 29140 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for Twisted.names' root resolver.
"""
from random import randrange
from zope.interface import implementer
from zope.interface.verify import verifyClass
from twisted.python.log import msg
from twisted.trial import util
from twisted.trial.unittest import SynchronousTestCase, TestCase
from twisted.internet.defer import Deferred, succeed, gatherResults, TimeoutError
from twisted.internet.task import Clock
from twisted.internet.address import IPv4Address
from twisted.internet.interfaces import IReactorUDP, IUDPTransport, IResolverSimple
from twisted.names import client, root
from twisted.names.root import Resolver
from twisted.names.dns import (
IN, HS, A, NS, CNAME, OK, ENAME, Record_CNAME,
Name, Query, Message, RRHeader, Record_A, Record_NS)
from twisted.names.error import DNSNameError, ResolverError
def getOnePayload(results):
"""
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the payload of the first record in the answer section.
"""
ans, auth, add = results
return ans[0].payload
def getOneAddress(results):
"""
From the result of a L{Deferred} returned by L{IResolver.lookupAddress},
return the first IPv4 address from the answer section.
"""
return getOnePayload(results).dottedQuad()
@implementer(IUDPTransport)
class MemoryDatagramTransport(object):
"""
This L{IUDPTransport} implementation enforces the usual connection rules
and captures sent traffic in a list for later inspection.
@ivar _host: The host address to which this transport is bound.
@ivar _protocol: The protocol connected to this transport.
@ivar _sentPackets: A C{list} of two-tuples of the datagrams passed to
C{write} and the addresses to which they are destined.
@ivar _connectedTo: C{None} if this transport is unconnected, otherwise an
address to which all traffic is supposedly sent.
@ivar _maxPacketSize: An C{int} giving the maximum length of a datagram
which will be successfully handled by C{write}.
"""
def __init__(self, host, protocol, maxPacketSize):
self._host = host
self._protocol = protocol
self._sentPackets = []
self._connectedTo = None
self._maxPacketSize = maxPacketSize
def getHost(self):
"""
Return the address which this transport is pretending to be bound
to.
"""
return IPv4Address('UDP', *self._host)
def connect(self, host, port):
"""
Connect this transport to the given address.
"""
if self._connectedTo is not None:
raise ValueError("Already connected")
self._connectedTo = (host, port)
def write(self, datagram, addr=None):
"""
Send the given datagram.
"""
if addr is None:
addr = self._connectedTo
if addr is None:
raise ValueError("Need an address")
if len(datagram) > self._maxPacketSize:
raise ValueError("Packet too big")
self._sentPackets.append((datagram, addr))
def stopListening(self):
"""
Shut down this transport.
"""
self._protocol.stopProtocol()
return succeed(None)
def setBroadcastAllowed(self, enabled):
"""
Dummy implementation to satisfy L{IUDPTransport}.
"""
pass
def getBroadcastAllowed(self):
"""
Dummy implementation to satisfy L{IUDPTransport}.
"""
pass
verifyClass(IUDPTransport, MemoryDatagramTransport)
@implementer(IReactorUDP)
class MemoryReactor(Clock):
"""
An L{IReactorTime} and L{IReactorUDP} provider.
Time is controlled deterministically via the base class, L{Clock}. UDP is
handled in-memory by connecting protocols to instances of
L{MemoryDatagramTransport}.
@ivar udpPorts: A C{dict} mapping port numbers to instances of
L{MemoryDatagramTransport}.
"""
def __init__(self):
Clock.__init__(self)
self.udpPorts = {}
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""
Pretend to bind a UDP port and connect the given protocol to it.
"""
if port == 0:
while True:
port = randrange(1, 2 ** 16)
if port not in self.udpPorts:
break
if port in self.udpPorts:
raise ValueError("Address in use")
transport = MemoryDatagramTransport(
(interface, port), protocol, maxPacketSize)
self.udpPorts[port] = transport
protocol.makeConnection(transport)
return transport
verifyClass(IReactorUDP, MemoryReactor)
class RootResolverTests(TestCase):
"""
Tests for L{twisted.names.root.Resolver}.
"""
def _queryTest(self, filter):
"""
Invoke L{Resolver._query} and verify that it sends the correct DNS
query. Deliver a canned response to the query and return whatever the
L{Deferred} returned by L{Resolver._query} fires with.
@param filter: The value to pass for the C{filter} parameter to
L{Resolver._query}.
"""
reactor = MemoryReactor()
resolver = Resolver([], reactor=reactor)
d = resolver._query(
Query(b'foo.example.com', A, IN), [('1.1.2.3', 1053)], (30,),
filter)
# A UDP port should have been started.
portNumber, transport = reactor.udpPorts.popitem()
# And a DNS packet sent.
[(packet, address)] = transport._sentPackets
message = Message()
message.fromStr(packet)
# It should be a query with the parameters used above.
self.assertEqual(message.queries, [Query(b'foo.example.com', A, IN)])
self.assertEqual(message.answers, [])
self.assertEqual(message.authority, [])
self.assertEqual(message.additional, [])
response = []
d.addCallback(response.append)
self.assertEqual(response, [])
# Once a reply is received, the Deferred should fire.
del message.queries[:]
message.answer = 1
message.answers.append(RRHeader(
b'foo.example.com', payload=Record_A('5.8.13.21')))
transport._protocol.datagramReceived(
message.toStr(), ('1.1.2.3', 1053))
return response[0]
def test_filteredQuery(self):
"""
L{Resolver._query} accepts a L{Query} instance and an address, issues
the query, and returns a L{Deferred} which fires with the response to
the query. If a true value is passed for the C{filter} parameter, the
result is a three-tuple of lists of records.
"""
answer, authority, additional = self._queryTest(True)
self.assertEqual(
answer,
[RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21', ttl=0))])
self.assertEqual(authority, [])
self.assertEqual(additional, [])
def test_unfilteredQuery(self):
"""
Similar to L{test_filteredQuery}, but for the case where a false value
is passed for the C{filter} parameter. In this case, the result is a
L{Message} instance.
"""
message = self._queryTest(False)
self.assertIsInstance(message, Message)
self.assertEqual(message.queries, [])
self.assertEqual(
message.answers,
[RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21', ttl=0))])
self.assertEqual(message.authority, [])
self.assertEqual(message.additional, [])
def _respond(self, answers=[], authority=[], additional=[], rCode=OK):
"""
Create a L{Message} suitable for use as a response to a query.
@param answers: A C{list} of two-tuples giving data for the answers
section of the message. The first element of each tuple is a name
for the L{RRHeader}. The second element is the payload.
@param authority: A C{list} like C{answers}, but for the authority
section of the response.
@param additional: A C{list} like C{answers}, but for the
additional section of the response.
@param rCode: The response code the message will be created with.
@return: A new L{Message} initialized with the given values.
"""
response = Message(rCode=rCode)
for (section, data) in [(response.answers, answers),
(response.authority, authority),
(response.additional, additional)]:
section.extend([
RRHeader(name, record.TYPE, getattr(record, 'CLASS', IN),
payload=record)
for (name, record) in data])
return response
def _getResolver(self, serverResponses, maximumQueries=10):
"""
Create and return a new L{root.Resolver} modified to resolve queries
against the record data represented by C{servers}.
@param serverResponses: A mapping from dns server addresses to
mappings. The inner mappings are from query two-tuples (name,
type) to dictionaries suitable for use as **arguments to
L{_respond}. See that method for details.
"""
roots = ['1.1.2.3']
resolver = Resolver(roots, maximumQueries)
def query(query, serverAddresses, timeout, filter):
msg("Query for QNAME %s at %r" % (query.name, serverAddresses))
for addr in serverAddresses:
try:
server = serverResponses[addr]
except KeyError:
continue
records = server[query.name.name, query.type]
return succeed(self._respond(**records))
resolver._query = query
return resolver
def test_lookupAddress(self):
"""
L{root.Resolver.lookupAddress} looks up the I{A} records for the
specified hostname by first querying one of the root servers the
resolver was created with and then following the authority delegations
until a result is received.
"""
servers = {
('1.1.2.3', 53): {
(b'foo.example.com', A): {
'authority': [(b'foo.example.com', Record_NS(b'ns1.example.com'))],
'additional': [(b'ns1.example.com', Record_A('34.55.89.144'))],
},
},
('34.55.89.144', 53): {
(b'foo.example.com', A): {
'answers': [(b'foo.example.com', Record_A('10.0.0.1'))],
}
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress(b'foo.example.com')
d.addCallback(getOneAddress)
d.addCallback(self.assertEqual, '10.0.0.1')
return d
def test_lookupChecksClass(self):
"""
If a response includes a record with a class different from the one
in the query, it is ignored and lookup continues until a record with
the right class is found.
"""
badClass = Record_A('10.0.0.1')
badClass.CLASS = HS
servers = {
('1.1.2.3', 53): {
('foo.example.com', A): {
'answers': [('foo.example.com', badClass)],
'authority': [('foo.example.com', Record_NS('ns1.example.com'))],
'additional': [('ns1.example.com', Record_A('10.0.0.2'))],
},
},
('10.0.0.2', 53): {
('foo.example.com', A): {
'answers': [('foo.example.com', Record_A('10.0.0.3'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('foo.example.com')
d.addCallback(getOnePayload)
d.addCallback(self.assertEqual, Record_A('10.0.0.3'))
return d
def test_missingGlue(self):
"""
If an intermediate response includes no glue records for the
authorities, separate queries are made to find those addresses.
"""
servers = {
('1.1.2.3', 53): {
(b'foo.example.com', A): {
'authority': [(b'foo.example.com', Record_NS(b'ns1.example.org'))],
# Conspicuous lack of an additional section naming ns1.example.com
},
(b'ns1.example.org', A): {
'answers': [(b'ns1.example.org', Record_A('10.0.0.1'))],
},
},
('10.0.0.1', 53): {
(b'foo.example.com', A): {
'answers': [(b'foo.example.com', Record_A('10.0.0.2'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress(b'foo.example.com')
d.addCallback(getOneAddress)
d.addCallback(self.assertEqual, '10.0.0.2')
return d
def test_missingName(self):
"""
If a name is missing, L{Resolver.lookupAddress} returns a L{Deferred}
which fails with L{DNSNameError}.
"""
servers = {
('1.1.2.3', 53): {
(b'foo.example.com', A): {
'rCode': ENAME,
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress(b'foo.example.com')
return self.assertFailure(d, DNSNameError)
def test_answerless(self):
"""
If a query is responded to with no answers or nameserver records, the
L{Deferred} returned by L{Resolver.lookupAddress} fires with
L{ResolverError}.
"""
servers = {
('1.1.2.3', 53): {
('example.com', A): {
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('example.com')
return self.assertFailure(d, ResolverError)
def test_delegationLookupError(self):
"""
If there is an error resolving the nameserver in a delegation response,
the L{Deferred} returned by L{Resolver.lookupAddress} fires with that
error.
"""
servers = {
('1.1.2.3', 53): {
('example.com', A): {
'authority': [('example.com', Record_NS('ns1.example.com'))],
},
('ns1.example.com', A): {
'rCode': ENAME,
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('example.com')
return self.assertFailure(d, DNSNameError)
def test_delegationLookupEmpty(self):
"""
If there are no records in the response to a lookup of a delegation
nameserver, the L{Deferred} returned by L{Resolver.lookupAddress} fires
with L{ResolverError}.
"""
servers = {
('1.1.2.3', 53): {
('example.com', A): {
'authority': [('example.com', Record_NS('ns1.example.com'))],
},
('ns1.example.com', A): {
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('example.com')
return self.assertFailure(d, ResolverError)
def test_lookupNameservers(self):
"""
L{Resolver.lookupNameservers} is like L{Resolver.lookupAddress}, except
it queries for I{NS} records instead of I{A} records.
"""
servers = {
('1.1.2.3', 53): {
(b'example.com', A): {
'rCode': ENAME,
},
(b'example.com', NS): {
'answers': [(b'example.com', Record_NS(b'ns1.example.com'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupNameservers(b'example.com')
def getOneName(results):
ans, auth, add = results
return ans[0].payload.name
d.addCallback(getOneName)
d.addCallback(self.assertEqual, Name(b'ns1.example.com'))
return d
def test_returnCanonicalName(self):
"""
If a I{CNAME} record is encountered as the answer to a query for
another record type, that record is returned as the answer.
"""
servers = {
('1.1.2.3', 53): {
(b'example.com', A): {
'answers': [(b'example.com', Record_CNAME(b'example.net')),
(b'example.net', Record_A('10.0.0.7'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress(b'example.com')
d.addCallback(lambda results: results[0]) # Get the answer section
d.addCallback(
self.assertEqual,
[RRHeader(b'example.com', CNAME, payload=Record_CNAME(b'example.net')),
RRHeader(b'example.net', A, payload=Record_A('10.0.0.7'))])
return d
def test_followCanonicalName(self):
"""
If no record of the requested type is included in a response, but a
I{CNAME} record for the query name is included, queries are made to
resolve the value of the I{CNAME}.
"""
servers = {
('1.1.2.3', 53): {
('example.com', A): {
'answers': [('example.com', Record_CNAME('example.net'))],
},
('example.net', A): {
'answers': [('example.net', Record_A('10.0.0.5'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('example.com')
d.addCallback(lambda results: results[0]) # Get the answer section
d.addCallback(
self.assertEqual,
[RRHeader('example.com', CNAME, payload=Record_CNAME('example.net')),
RRHeader('example.net', A, payload=Record_A('10.0.0.5'))])
return d
def test_detectCanonicalNameLoop(self):
"""
If there is a cycle between I{CNAME} records in a response, this is
detected and the L{Deferred} returned by the lookup method fails
with L{ResolverError}.
"""
servers = {
('1.1.2.3', 53): {
('example.com', A): {
'answers': [('example.com', Record_CNAME('example.net')),
('example.net', Record_CNAME('example.com'))],
},
},
}
resolver = self._getResolver(servers)
d = resolver.lookupAddress('example.com')
return self.assertFailure(d, ResolverError)
def test_boundedQueries(self):
"""
L{Resolver.lookupAddress} won't issue more queries following
delegations than the limit passed to its initializer.
"""
servers = {
('1.1.2.3', 53): {
# First query - force it to start over with a name lookup of
# ns1.example.com
('example.com', A): {
'authority': [('example.com', Record_NS('ns1.example.com'))],
},
# Second query - let it resume the original lookup with the
# address of the nameserver handling the delegation.
('ns1.example.com', A): {
'answers': [('ns1.example.com', Record_A('10.0.0.2'))],
},
},
('10.0.0.2', 53): {
# Third query - let it jump straight to asking the
# delegation server by including its address here (different
# case from the first query).
('example.com', A): {
'authority': [('example.com', Record_NS('ns2.example.com'))],
'additional': [('ns2.example.com', Record_A('10.0.0.3'))],
},
},
('10.0.0.3', 53): {
# Fourth query - give it the answer, we're done.
('example.com', A): {
'answers': [('example.com', Record_A('10.0.0.4'))],
},
},
}
# Make two resolvers. One which is allowed to make 3 queries
# maximum, and so will fail, and on which may make 4, and so should
# succeed.
failer = self._getResolver(servers, 3)
failD = self.assertFailure(
failer.lookupAddress('example.com'), ResolverError)
succeeder = self._getResolver(servers, 4)
succeedD = succeeder.lookupAddress('example.com')
succeedD.addCallback(getOnePayload)
succeedD.addCallback(self.assertEqual, Record_A('10.0.0.4'))
return gatherResults([failD, succeedD])
class ResolverFactoryArguments(Exception):
"""
Raised by L{raisingResolverFactory} with the *args and **kwargs passed to
that function.
"""
def __init__(self, args, kwargs):
"""
Store the supplied args and kwargs as attributes.
@param args: Positional arguments.
@param kwargs: Keyword arguments.
"""
self.args = args
self.kwargs = kwargs
def raisingResolverFactory(*args, **kwargs):
"""
Raise a L{ResolverFactoryArguments} exception containing the
positional and keyword arguments passed to resolverFactory.
@param args: A L{list} of all the positional arguments supplied by
the caller.
@param kwargs: A L{list} of all the keyword arguments supplied by
the caller.
"""
raise ResolverFactoryArguments(args, kwargs)
class RootResolverResolverFactoryTests(TestCase):
"""
Tests for L{root.Resolver._resolverFactory}.
"""
def test_resolverFactoryArgumentPresent(self):
"""
L{root.Resolver.__init__} accepts a C{resolverFactory}
argument and assigns it to C{self._resolverFactory}.
"""
r = Resolver(hints=[None], resolverFactory=raisingResolverFactory)
self.assertIdentical(r._resolverFactory, raisingResolverFactory)
def test_resolverFactoryArgumentAbsent(self):
"""
L{root.Resolver.__init__} sets L{client.Resolver} as the
C{_resolverFactory} if a C{resolverFactory} argument is not
supplied.
"""
r = Resolver(hints=[None])
self.assertIdentical(r._resolverFactory, client.Resolver)
def test_resolverFactoryOnlyExpectedArguments(self):
"""
L{root.Resolver._resolverFactory} is supplied with C{reactor} and
C{servers} keyword arguments.
"""
dummyReactor = object()
r = Resolver(hints=['192.0.2.101'],
resolverFactory=raisingResolverFactory,
reactor=dummyReactor)
e = self.assertRaises(ResolverFactoryArguments,
r.lookupAddress, 'example.com')
self.assertEqual(
((), {'reactor': dummyReactor, 'servers': [('192.0.2.101', 53)]}),
(e.args, e.kwargs)
)
ROOT_SERVERS = [
'a.root-servers.net',
'b.root-servers.net',
'c.root-servers.net',
'd.root-servers.net',
'e.root-servers.net',
'f.root-servers.net',
'g.root-servers.net',
'h.root-servers.net',
'i.root-servers.net',
'j.root-servers.net',
'k.root-servers.net',
'l.root-servers.net',
'm.root-servers.net']
@implementer(IResolverSimple)
class StubResolver(object):
"""
An L{IResolverSimple} implementer which traces all getHostByName
calls and their deferred results. The deferred results can be
accessed and fired synchronously.
"""
def __init__(self):
"""
@type calls: L{list} of L{tuple} containing C{args} and
C{kwargs} supplied to C{getHostByName} calls.
@type pendingResults: L{list} of L{Deferred} returned by
C{getHostByName}.
"""
self.calls = []
self.pendingResults = []
def getHostByName(self, *args, **kwargs):
"""
A fake implementation of L{IResolverSimple.getHostByName}
@param args: A L{list} of all the positional arguments supplied by
the caller.
@param kwargs: A L{list} of all the keyword arguments supplied by
the caller.
@return: A L{Deferred} which may be fired later from the test
fixture.
"""
self.calls.append((args, kwargs))
d = Deferred()
self.pendingResults.append(d)
return d
verifyClass(IResolverSimple, StubResolver)
class BootstrapTests(SynchronousTestCase):
"""
Tests for L{root.bootstrap}
"""
def test_returnsDeferredResolver(self):
"""
L{root.bootstrap} returns an object which is initially a
L{root.DeferredResolver}.
"""
deferredResolver = root.bootstrap(StubResolver())
self.assertIsInstance(deferredResolver, root.DeferredResolver)
def test_resolves13RootServers(self):
"""
The L{IResolverSimple} supplied to L{root.bootstrap} is used to lookup
the IP addresses of the 13 root name servers.
"""
stubResolver = StubResolver()
root.bootstrap(stubResolver)
self.assertEqual(
stubResolver.calls,
[((s,), {}) for s in ROOT_SERVERS])
def test_becomesResolver(self):
"""
The L{root.DeferredResolver} initially returned by L{root.bootstrap}
becomes a L{root.Resolver} when the supplied resolver has successfully
looked up all root hints.
"""
stubResolver = StubResolver()
deferredResolver = root.bootstrap(stubResolver)
for d in stubResolver.pendingResults:
d.callback('192.0.2.101')
self.assertIsInstance(deferredResolver, Resolver)
def test_resolverReceivesRootHints(self):
"""
The L{root.Resolver} which eventually replaces L{root.DeferredResolver}
is supplied with the IP addresses of the 13 root servers.
"""
stubResolver = StubResolver()
deferredResolver = root.bootstrap(stubResolver)
for d in stubResolver.pendingResults:
d.callback('192.0.2.101')
self.assertEqual(deferredResolver.hints, ['192.0.2.101'] * 13)
def test_continuesWhenSomeRootHintsFail(self):
"""
The L{root.Resolver} is eventually created, even if some of the root
hint lookups fail. Only the working root hint IP addresses are supplied
to the L{root.Resolver}.
"""
stubResolver = StubResolver()
deferredResolver = root.bootstrap(stubResolver)
results = iter(stubResolver.pendingResults)
d1 = next(results)
for d in results:
d.callback('192.0.2.101')
d1.errback(TimeoutError())
def checkHints(res):
self.assertEqual(deferredResolver.hints, ['192.0.2.101'] * 12)
d1.addBoth(checkHints)
def test_continuesWhenAllRootHintsFail(self):
"""
The L{root.Resolver} is eventually created, even if all of the root hint
lookups fail. Pending and new lookups will then fail with
AttributeError.
"""
stubResolver = StubResolver()
deferredResolver = root.bootstrap(stubResolver)
results = iter(stubResolver.pendingResults)
d1 = next(results)
for d in results:
d.errback(TimeoutError())
d1.errback(TimeoutError())
def checkHints(res):
self.assertEqual(deferredResolver.hints, [])
d1.addBoth(checkHints)
self.addCleanup(self.flushLoggedErrors, TimeoutError)
def test_passesResolverFactory(self):
"""
L{root.bootstrap} accepts a C{resolverFactory} argument which is passed
as an argument to L{root.Resolver} when it has successfully looked up
root hints.
"""
stubResolver = StubResolver()
deferredResolver = root.bootstrap(
stubResolver, resolverFactory=raisingResolverFactory)
for d in stubResolver.pendingResults:
d.callback('192.0.2.101')
self.assertIdentical(
deferredResolver._resolverFactory, raisingResolverFactory)
class StubDNSDatagramProtocol:
"""
A do-nothing stand-in for L{DNSDatagramProtocol} which can be used to avoid
network traffic in tests where that kind of thing doesn't matter.
"""
def query(self, *a, **kw):
return Deferred()
_retrySuppression = util.suppress(
category=DeprecationWarning,
message=(
'twisted.names.root.retry is deprecated since Twisted 10.0. Use a '
'Resolver object for retry logic.'))
| bsd-3-clause |
tboyce021/home-assistant | homeassistant/util/json.py | 10 | 4510 | """JSON utility functions."""
from collections import deque
import json
import logging
import os
import tempfile
from typing import Any, Callable, Dict, List, Optional, Type, Union
from homeassistant.core import Event, State
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
class SerializationError(HomeAssistantError):
"""Error serializing the data to JSON."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def load_json(
filename: str, default: Union[List, Dict, None] = None
) -> Union[List, Dict]:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding="utf-8") as fdesc:
return json.loads(fdesc.read()) # type: ignore
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug("JSON file not found: %s", filename)
except ValueError as error:
_LOGGER.exception("Could not parse JSON content: %s", filename)
raise HomeAssistantError(error) from error
except OSError as error:
_LOGGER.exception("JSON file reading failed: %s", filename)
raise HomeAssistantError(error) from error
return {} if default is None else default
def save_json(
filename: str,
data: Union[List, Dict],
private: bool = False,
*,
encoder: Optional[Type[json.JSONEncoder]] = None,
) -> None:
"""Save JSON data to a file.
Returns True on success.
"""
try:
json_data = json.dumps(data, indent=4, cls=encoder)
except TypeError as error:
msg = f"Failed to serialize to JSON: {filename}. Bad data at {format_unserializable_data(find_paths_unserializable_data(data))}"
_LOGGER.error(msg)
raise SerializationError(msg) from error
tmp_filename = ""
tmp_path = os.path.split(filename)[0]
try:
# Modern versions of Python tempfile create this file with mode 0o600
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", dir=tmp_path, delete=False
) as fdesc:
fdesc.write(json_data)
tmp_filename = fdesc.name
if not private:
os.chmod(tmp_filename, 0o644)
os.replace(tmp_filename, filename)
except OSError as error:
_LOGGER.exception("Saving JSON file failed: %s", filename)
raise WriteError(error) from error
finally:
if os.path.exists(tmp_filename):
try:
os.remove(tmp_filename)
except OSError as err:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("JSON replacement cleanup failed: %s", err)
def format_unserializable_data(data: Dict[str, Any]) -> str:
"""Format output of find_paths in a friendly way.
Format is comma separated: <path>=<value>(<type>)
"""
return ", ".join(f"{path}={value}({type(value)}" for path, value in data.items())
def find_paths_unserializable_data(
bad_data: Any, *, dump: Callable[[Any], str] = json.dumps
) -> Dict[str, Any]:
"""Find the paths to unserializable data.
This method is slow! Only use for error handling.
"""
to_process = deque([(bad_data, "$")])
invalid = {}
while to_process:
obj, obj_path = to_process.popleft()
try:
dump(obj)
continue
except (ValueError, TypeError):
pass
# We convert states and events to dict so we can find bad data inside it
if isinstance(obj, State):
obj_path += f"(state: {obj.entity_id})"
obj = obj.as_dict()
elif isinstance(obj, Event):
obj_path += f"(event: {obj.event_type})"
obj = obj.as_dict()
if isinstance(obj, dict):
for key, value in obj.items():
try:
# Is key valid?
dump({key: None})
except TypeError:
invalid[f"{obj_path}<key: {key}>"] = key
else:
# Process value
to_process.append((value, f"{obj_path}.{key}"))
elif isinstance(obj, list):
for idx, value in enumerate(obj):
to_process.append((value, f"{obj_path}[{idx}]"))
else:
invalid[obj_path] = obj
return invalid
| apache-2.0 |
rmacduff/rt-notify | rt-notify.py | 1 | 2106 | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import glib
import pynotify
import sys
import os
import subprocess
import re
import time
QUERY = "\"Owner=\'Nobody\' AND ( Status=\'new\' OR Status=\'open\' ) AND ( Queue!=\'spambin\' AND Queue!=\'maildrop\' AND Queue!=\'learnspam\' )\""
TIME = 30 # in seconds
KEEP_STATE = True
RT_CLI = "/usr/bin/rt-3.6"
_rt_cmd = [RT_CLI + " ls " + QUERY]
_rt_img = "file://" + os.path.abspath(os.path.dirname(sys.argv[0])) + "/rt_img.png"
seen_queue = []
out_queue = []
def checkRT():
global seen_queue, out_queue
output = subprocess.Popen(_rt_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
# Do nothing if it returns "No matching results."
pat = "^No matching results."
match = re.match(pat, output)
if not match:
for line in output.split('\n'):
if len(line.strip()) == 0:
pass
ticket_id = line.split(':')[0]
if KEEP_STATE:
if ticket_id not in seen_queue:
seen_queue.append(ticket_id)
out_queue.append(line)
# Keep the seen queue from growing too large (improve this)
if len(seen_queue) > 50:
seen_queue.pop(0)
else:
pass
else:
out_queue.append(line)
# Done for loop
# Display notice if there's something to show
if len(out_queue) != 0:
n = pynotify.Notification("RT Notice", '\n'.join(out_queue), _rt_img)
n.set_urgency(pynotify.URGENCY_LOW)
if not n.show():
print "Failed to send notification"
sys.exit(1)
out_queue = []
# So glib to keep calling me
return True
if __name__ == '__main__':
if not pynotify.init("Urgency"):
sys.exit(1)
# Setup a loop and a callback function
main = glib.MainLoop()
glib.timeout_add_seconds(TIME, checkRT)
main.run()
| gpl-2.0 |
sivas2811/mocha_739 | hotdot_env/lib/python2.7/site-packages/pip/vendor/distlib/markers.py | 80 | 6258 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.in_venv': str(in_venv()),
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': platform.python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
valid = True
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
elif (isinstance(lhsnode, ast.Attribute)
and isinstance(rhsnode, ast.Attribute)):
klhs = self.get_attr_key(lhsnode)
krhs = self.get_attr_key(rhsnode)
valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| unlicense |
lanurmi/cppcheck | addons/namingng.py | 2 | 11612 | #!/usr/bin/env python3
#
# cppcheck addon for naming conventions
# An enhanced version. Configuration is taken from a json file
# It supports to check for type-based prefixes in function or variable names.
#
# Example usage (variable name must start with lowercase, function name must start with uppercase):
# $ cppcheck --dump path-to-src/
# $ python namingng.py test.c.dump
#
# JSON format:
#
# {
# "RE_VARNAME": "[a-z]*[a-zA-Z0-9_]*\\Z",
# "RE_PRIVATE_MEMBER_VARIABLE": null,
# "RE_FUNCTIONNAME": "[a-z0-9A-Z]*\\Z",
# "var_prefixes": {"uint32_t": "ui32"},
# "function_prefixes": {"uint16_t": "ui16",
# "uint32_t": "ui32"}
# }
#
# RE_VARNAME, RE_PRIVATE_MEMBER_VARIABLE and RE_FUNCTIONNAME are regular expressions to cover the basic names
# In var_prefixes and function_prefixes there are the variable-type/prefix pairs
import cppcheckdata
import sys
import re
import argparse
import json
# Auxiliary class
class DataStruct:
def __init__(self, file, linenr, string):
self.file = file
self.linenr = linenr
self.str = string
def reportError(filename, linenr, severity, msg):
message = "[{filename}:{linenr}] ( {severity} ) naming.py: {msg}\n".format(
filename=filename,
linenr=linenr,
severity=severity,
msg=msg
)
sys.stderr.write(message)
return message
def loadConfig(configfile):
with open(configfile) as fh:
data = json.load(fh)
return data
def checkTrueRegex(data, expr, msg, errors):
res = re.match(expr, data.str)
if res:
errors.append(reportError(data.file, data.linenr, 'style', msg))
def checkFalseRegex(data, expr, msg, errors):
res = re.match(expr, data.str)
if not res:
errors.append(reportError(data.file, data.linenr, 'style', msg))
def evalExpr(conf, exp, mockToken, msgType, errors):
if isinstance(conf, dict):
if conf[exp][0]:
msg = msgType + ' ' + mockToken.str + ' violates naming convention : ' + conf[exp][1]
checkTrueRegex(mockToken, exp, msg, errors)
elif ~conf[exp][0]:
msg = msgType + ' ' + mockToken.str + ' violates naming convention : ' + conf[exp][1]
checkFalseRegex(mockToken, exp, msg, errors)
else:
msg = msgType + ' ' + mockToken.str + ' violates naming convention : ' + conf[exp][0]
checkFalseRegex(mockToken, exp, msg, errors)
else:
msg = msgType + ' ' + mockToken.str + ' violates naming convention'
checkFalseRegex(mockToken, exp, msg, errors)
def process(dumpfiles, configfile, debugprint=False):
errors = []
conf = loadConfig(configfile)
for afile in dumpfiles:
if not afile[-5:] == '.dump':
continue
print('Checking ' + afile + '...')
data = cppcheckdata.CppcheckData(afile)
# Check File naming
if "RE_FILE" in conf and conf["RE_FILE"]:
mockToken = DataStruct(afile[:-5], "0", afile[afile.rfind('/') + 1:-5])
msgType = 'File name'
for exp in conf["RE_FILE"]:
evalExpr(conf["RE_FILE"], exp, mockToken, msgType, errors)
# Check Namespace naming
if "RE_NAMESPACE" in conf and conf["RE_NAMESPACE"]:
for tk in data.rawTokens:
if tk.str == 'namespace':
mockToken = DataStruct(tk.next.file, tk.next.linenr, tk.next.str)
msgType = 'Namespace'
for exp in conf["RE_NAMESPACE"]:
evalExpr(conf["RE_NAMESPACE"], exp, mockToken, msgType, errors)
for cfg in data.configurations:
print('Checking %s, config %s...' % (afile, cfg.name))
if "RE_VARNAME" in conf and conf["RE_VARNAME"]:
for var in cfg.variables:
if var.nameToken and var.access != 'Global' and var.access != 'Public' and var.access != 'Private':
prev = var.nameToken.previous
varType = prev.str
while "*" in varType and len(varType.replace("*", "")) == 0:
prev = prev.previous
varType = prev.str + varType
if debugprint:
print("Variable Name: " + str(var.nameToken.str))
print("original Type Name: " + str(var.nameToken.valueType.originalTypeName))
print("Type Name: " + var.nameToken.valueType.type)
print("Sign: " + str(var.nameToken.valueType.sign))
print("variable type: " + varType)
print("\n")
print("\t-- {} {}".format(varType, str(var.nameToken.str)))
if conf["skip_one_char_variables"] and len(var.nameToken.str) == 1:
continue
if varType in conf["var_prefixes"]:
if not var.nameToken.str.startswith(conf["var_prefixes"][varType]):
errors.append(reportError(
var.typeStartToken.file,
var.typeStartToken.linenr,
'style',
'Variable ' +
var.nameToken.str +
' violates naming convention'))
mockToken = DataStruct(var.typeStartToken.file, var.typeStartToken.linenr, var.nameToken.str)
msgType = 'Variable'
for exp in conf["RE_VARNAME"]:
evalExpr(conf["RE_VARNAME"], exp, mockToken, msgType, errors)
# Check Private Variable naming
if "RE_PRIVATE_MEMBER_VARIABLE" in conf and conf["RE_PRIVATE_MEMBER_VARIABLE"]:
# TODO: Not converted yet
for var in cfg.variables:
if (var.access is None) or var.access != 'Private':
continue
mockToken = DataStruct(var.typeStartToken.file, var.typeStartToken.linenr, var.nameToken.str)
msgType = 'Private member variable'
for exp in conf["RE_PRIVATE_MEMBER_VARIABLE"]:
evalExpr(conf["RE_PRIVATE_MEMBER_VARIABLE"], exp, mockToken, msgType, errors)
# Check Public Member Variable naming
if "RE_PUBLIC_MEMBER_VARIABLE" in conf and conf["RE_PUBLIC_MEMBER_VARIABLE"]:
for var in cfg.variables:
if (var.access is None) or var.access != 'Public':
continue
mockToken = DataStruct(var.typeStartToken.file, var.typeStartToken.linenr, var.nameToken.str)
msgType = 'Public member variable'
for exp in conf["RE_PUBLIC_MEMBER_VARIABLE"]:
evalExpr(conf["RE_PUBLIC_MEMBER_VARIABLE"], exp, mockToken, msgType, errors)
# Check Global Variable naming
if "RE_GLOBAL_VARNAME" in conf and conf["RE_GLOBAL_VARNAME"]:
for var in cfg.variables:
if (var.access is None) or var.access != 'Global':
continue
mockToken = DataStruct(var.typeStartToken.file, var.typeStartToken.linenr, var.nameToken.str)
msgType = 'Public member variable'
for exp in conf["RE_GLOBAL_VARNAME"]:
evalExpr(conf["RE_GLOBAL_VARNAME"], exp, mockToken, msgType, errors)
# Check Functions naming
if "RE_FUNCTIONNAME" in conf and conf["RE_FUNCTIONNAME"]:
for token in cfg.tokenlist:
if token.function:
if token.function.type == 'Constructor' or token.function.type == 'Destructor':
continue
retval = token.previous.str
prev = token.previous
while "*" in retval and len(retval.replace("*", "")) == 0:
prev = prev.previous
retval = prev.str + retval
if debugprint:
print("\t:: {} {}".format(retval, token.function.name))
if retval and retval in conf["function_prefixes"]:
if not token.function.name.startswith(conf["function_prefixes"][retval]):
errors.append(reportError(
token.file, token.linenr, 'style', 'Function ' + token.function.name + ' violates naming convention'))
mockToken = DataStruct(token.file, token.linenr, token.function.name)
msgType = 'Function'
for exp in conf["RE_FUNCTIONNAME"]:
evalExpr(conf["RE_FUNCTIONNAME"], exp, mockToken, msgType, errors)
# Check Class naming
if "RE_CLASS_NAME" in conf and conf["RE_CLASS_NAME"]:
for fnc in cfg.functions:
# Check if it is Constructor/Destructor
if fnc.type == 'Constructor' or fnc.type == 'Destructor':
mockToken = DataStruct(fnc.tokenDef.file, fnc.tokenDef.linenr, fnc.name)
msgType = 'Class ' + fnc.type
for exp in conf["RE_CLASS_NAME"]:
evalExpr(conf["RE_CLASS_NAME"], exp, mockToken, msgType, errors)
return errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Naming verification')
parser.add_argument('dumpfiles', type=str, nargs='+',
help='A set of dumpfiles to process')
parser.add_argument("--debugprint", action="store_true", default=False,
help="Add debug prints")
parser.add_argument("--configfile", type=str, default="naming.json",
help="Naming check config file")
parser.add_argument("--verify", action="store_true", default=False,
help="verify this script. Must be executed in test folder !")
args = parser.parse_args()
errors = process(args.dumpfiles, args.configfile, args.debugprint)
if args.verify:
print(errors)
if len(errors) < 6:
print("Not enough errors found")
sys.exit(1)
target = [
'[namingng_test.c:8] ( style ) naming.py: Variable badui32 violates naming convention\n',
'[namingng_test.c:11] ( style ) naming.py: Variable a violates naming convention\n',
'[namingng_test.c:29] ( style ) naming.py: Variable badui32 violates naming convention\n',
'[namingng_test.c:20] ( style ) naming.py: Function ui16bad_underscore violates naming convention\n',
'[namingng_test.c:25] ( style ) naming.py: Function u32Bad violates naming convention\n',
'[namingng_test.c:37] ( style ) naming.py: Function Badui16 violates naming convention\n']
diff = set(errors) - set(target)
if len(diff):
print("Not the right errors found {}".format(str(diff)))
sys.exit(1)
print("Verification done\n")
sys.exit(0)
if len(errors):
print('Found errors: {}'.format(len(errors)))
sys.exit(1)
sys.exit(0)
| gpl-3.0 |
JioCloud/contrail-generateDS | tests/extensions1_sub.py | 5 | 7724 | #!/usr/bin/env python
#
# Generated by generateDS.py.
#
import sys
import extensions2_sup as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class SpecialDateSub(supermod.SpecialDate):
def __init__(self, SpecialProperty=None, valueOf_=None):
super(SpecialDateSub, self).__init__(SpecialProperty, valueOf_, )
supermod.SpecialDate.subclass = SpecialDateSub
# end class SpecialDateSub
class ExtremeDateSub(supermod.ExtremeDate):
def __init__(self, ExtremeProperty=None, valueOf_=None):
super(ExtremeDateSub, self).__init__(ExtremeProperty, valueOf_, )
supermod.ExtremeDate.subclass = ExtremeDateSub
# end class ExtremeDateSub
class singleExtremeDateSub(supermod.singleExtremeDate):
def __init__(self, ExtremeProperty=None, valueOf_=None):
super(singleExtremeDateSub, self).__init__(ExtremeProperty, valueOf_, )
supermod.singleExtremeDate.subclass = singleExtremeDateSub
# end class singleExtremeDateSub
class containerTypeSub(supermod.containerType):
def __init__(self, simplefactoid=None, mixedfactoid=None):
super(containerTypeSub, self).__init__(simplefactoid, mixedfactoid, )
supermod.containerType.subclass = containerTypeSub
# end class containerTypeSub
class simpleFactoidTypeSub(supermod.simpleFactoidType):
def __init__(self, relation=None):
super(simpleFactoidTypeSub, self).__init__(relation, )
supermod.simpleFactoidType.subclass = simpleFactoidTypeSub
# end class simpleFactoidTypeSub
class mixedFactoidTypeSub(supermod.mixedFactoidType):
def __init__(self, relation=None, valueOf_=None, mixedclass_=None, content_=None):
super(mixedFactoidTypeSub, self).__init__(relation, valueOf_, mixedclass_, content_, )
supermod.mixedFactoidType.subclass = mixedFactoidTypeSub
# end class mixedFactoidTypeSub
class BaseTypeSub(supermod.BaseType):
def __init__(self, BaseProperty1=None, BaseProperty2=None, valueOf_=None, extensiontype_=None):
super(BaseTypeSub, self).__init__(BaseProperty1, BaseProperty2, valueOf_, extensiontype_, )
supermod.BaseType.subclass = BaseTypeSub
# end class BaseTypeSub
class DerivedTypeSub(supermod.DerivedType):
def __init__(self, BaseProperty1=None, BaseProperty2=None, DerivedProperty1=None, DerivedProperty2=None, valueOf_=None):
super(DerivedTypeSub, self).__init__(BaseProperty1, BaseProperty2, DerivedProperty1, DerivedProperty2, valueOf_, )
supermod.DerivedType.subclass = DerivedTypeSub
# end class DerivedTypeSub
class MyIntegerSub(supermod.MyInteger):
def __init__(self, MyAttr=None, valueOf_=None):
super(MyIntegerSub, self).__init__(MyAttr, valueOf_, )
supermod.MyInteger.subclass = MyIntegerSub
# end class MyIntegerSub
class MyBooleanSub(supermod.MyBoolean):
def __init__(self, MyAttr=None, valueOf_=None):
super(MyBooleanSub, self).__init__(MyAttr, valueOf_, )
supermod.MyBoolean.subclass = MyBooleanSub
# end class MyBooleanSub
class MyFloatSub(supermod.MyFloat):
def __init__(self, MyAttr=None, valueOf_=None):
super(MyFloatSub, self).__init__(MyAttr, valueOf_, )
supermod.MyFloat.subclass = MyFloatSub
# end class MyFloatSub
class MyDoubleSub(supermod.MyDouble):
def __init__(self, MyAttr=None, valueOf_=None):
super(MyDoubleSub, self).__init__(MyAttr, valueOf_, )
supermod.MyDouble.subclass = MyDoubleSub
# end class MyDoubleSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
if hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = supermod.containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='',
## pretty_print=True)
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = supermod.containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = supermod.containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from extensions2_sup import *\n\n')
## sys.stdout.write('import extensions2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.container(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_="container")
## sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
| mit |
mgedmin/ansible | lib/ansible/plugins/callback/junit.py | 31 | 8269 | # (c) 2016 Matt Clay <matt@mystile.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
from ansible.module_utils._text import to_bytes
from ansible.plugins.callback import CallbackBase
try:
from junit_xml import TestSuite, TestCase
HAS_JUNIT_XML = True
except ImportError:
HAS_JUNIT_XML = False
try:
from collections import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
try:
from ordereddict import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
HAS_ORDERED_DICT = False
class CallbackModule(CallbackBase):
"""
This callback writes playbook output to a JUnit formatted XML file.
Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped
This plugin makes use of the following environment variables:
JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
Default: ~/.ansible.log
Requires:
junit_xml
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
self._playbook_path = None
self._playbook_name = None
self._play_name = None
self._task_data = None
self.disabled = False
if not HAS_JUNIT_XML:
self.disabled = True
self._display.warning('The `junit_xml` python module is not installed. '
'Disabling the `junit` callback plugin.')
if HAS_ORDERED_DICT:
self._task_data = OrderedDict()
else:
self.disabled = True
self._display.warning('The `ordereddict` python module is not installed. '
'Disabling the `junit` callback plugin.')
if not os.path.exists(self._output_dir):
os.mkdir(self._output_dir)
def _start_task(self, task):
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in self._task_data:
return
play = self._play_name
name = task.get_name().strip()
path = task.get_path()
if not task.no_log:
args = ', '.join(('%s=%s' % a for a in task.args.items()))
if args:
name += ' ' + args
self._task_data[uuid] = TaskData(uuid, name, path, play)
def _finish_task(self, status, result):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
if hasattr(result, '_host'):
host_uuid = result._host._uuid
host_name = result._host.name
else:
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
task_data.add_host(HostData(host_uuid, host_name, status, result))
def _build_test_case(self, task_data, host_data):
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = host_data.finish - task_data.start
if host_data.status == 'included':
return TestCase(name, task_data.path, duration, host_data.result)
res = host_data.result._result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
if host_data.status == 'ok':
return TestCase(name, task_data.path, duration, dump)
test_case = TestCase(name, task_data.path, duration)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
output = res['exception']
test_case.add_error_info(message, output)
elif 'msg' in res:
message = res['msg']
test_case.add_failure_info(message, dump)
else:
test_case.add_failure_info('rc=%s' % rc, dump)
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.add_skipped_info(message)
return test_case
def _generate_report(self):
""" generate a TestSuite report from the collected TaskData and HostData """
test_cases = []
for task_uuid, task_data in self._task_data.items():
for host_uuid, host_data in task_data.host_data.items():
test_cases.append(self._build_test_case(task_data, host_data))
test_suite = TestSuite(self._playbook_name, test_cases)
report = TestSuite.to_xml_string([test_suite])
output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
with open(output_file, 'wb') as xml:
xml.write(to_bytes(report, errors='surrogate_or_strict'))
def v2_playbook_on_start(self, playbook):
self._playbook_path = playbook._file_name
self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
def v2_runner_on_no_hosts(self, task):
self._start_task(task)
def v2_playbook_on_task_start(self, task, is_conditional):
self._start_task(task)
def v2_playbook_on_cleanup_task_start(self, task):
self._start_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._start_task(task)
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors:
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
def v2_runner_on_ok(self, result):
self._finish_task('ok', result)
def v2_runner_on_skipped(self, result):
self._finish_task('skipped', result)
def v2_playbook_on_include(self, included_file):
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
self._generate_report()
class TaskData:
"""
Data about an individual task.
"""
def __init__(self, uuid, name, path, play):
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
self.host_data = OrderedDict()
self.start = time.time()
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
self.host_data[host.uuid] = host
class HostData:
"""
Data about an individual host.
"""
def __init__(self, uuid, name, status, result):
self.uuid = uuid
self.name = name
self.status = status
self.result = result
self.finish = time.time()
| gpl-3.0 |
chafique-delli/OpenUpgrade | addons/portal_project_issue/tests/__init__.py | 167 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
csantosb/password-store | contrib/importers/keepassx2pass.py | 30 | 2707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Juhamatti Niemelä <iiska@iki.fi>. All Rights Reserved.
# This file is licensed under the GPLv2+. Please see COPYING for more information.
import sys
import re
from subprocess import Popen, PIPE
from xml.etree import ElementTree
def space_to_camelcase(value):
output = ""
first_word_passed = False
for word in value.split(" "):
if not word:
output += "_"
continue
if first_word_passed:
output += word.capitalize()
else:
output += word.lower()
first_word_passed = True
return output
def cleanTitle(title):
# make the title more command line friendly
title = re.sub("(\\|\||\(|\)|/)", "-", title)
title = re.sub("-$", "", title)
title = re.sub("\@", "At", title)
title = re.sub("'", "", title)
return title
def path_for(element, path=''):
""" Generate path name from elements title and current path """
title_text = element.find('title').text
if title_text is None:
title_text = ''
title = cleanTitle(space_to_camelcase(title_text))
return '/'.join([path, title])
def password_data(element):
""" Return password data and additional info if available from
password entry element. """
passwd = element.find('password').text
ret = passwd + "\n" if passwd else "\n"
for field in ['username', 'url', 'comment']:
fel = element.find(field)
children = [unicode(e.text or '') + unicode(e.tail or '') for e in list(fel)]
if len(children) > 0:
children.insert(0, '')
text = (fel.text or '') + "\n".join(children)
if len(text) > 0:
ret = "%s%s: %s\n" % (ret, fel.tag, text)
return ret
def import_entry(element, path=''):
""" Import new password entry to password-store using pass insert
command """
print "Importing " + path_for(element, path)
proc = Popen(['pass', 'insert', '--multiline', '--force',
path_for(element, path)],
stdin=PIPE, stdout=PIPE)
proc.communicate(password_data(element).encode('utf8'))
proc.wait()
def import_group(element, path=''):
""" Import all entries and sub-groups from given group """
npath = path_for(element, path)
for group in element.findall('group'):
import_group(group, npath)
for entry in element.findall('entry'):
import_entry(entry, npath)
def main(xml_file):
""" Parse given KeepassX XML file and import password groups from it """
for group in ElementTree.parse(xml_file).findall('group'):
import_group(group)
if __name__ == '__main__':
main(sys.argv[1])
| gpl-2.0 |
mkrautz/gyp-libmumble | test/ninja/normalize-paths-win/gyptest-normalize-paths.py | 180 | 1272 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure paths are normalized with VS macros properly expanded on Windows.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('normalize-paths.gyp')
# We can't use existence tests because any case will pass, so we check the
# contents of ninja files directly since that's what we're most concerned
# with anyway.
subninja = open(test.built_file_path('obj/some_target.ninja')).read()
if '$!product_dir' in subninja:
test.fail_test()
if 'out\\Default' in subninja:
test.fail_test()
second = open(test.built_file_path('obj/second.ninja')).read()
if ('..\\..\\things\\AnotherName.exe' in second or
'AnotherName.exe' not in second):
test.fail_test()
action = open(test.built_file_path('obj/action.ninja')).read()
if '..\\..\\out\\Default' in action:
test.fail_test()
if '..\\..\\SomethingElse' in action or 'SomethingElse' not in action:
test.fail_test()
if '..\\..\\SomeOtherInput' in action or 'SomeOtherInput' not in action:
test.fail_test()
test.pass_test()
| bsd-3-clause |
EdgarRMmex/QPSTools | qdf.py | 1 | 5654 | """
"""
import codecs
import re
import pandas as pd
import numpy as np
class Entry:
def __init(self):
self.name = None
self.text = None
self.pandas_type = None
self.qps_type = None
self.start = None
self.size = None
self.max_sel = None
self.responses = None
self.instructions = None
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Response:
def __init__(self):
self.text = None
types_dict = {
"C": "object",
"S": "int",
"M": "int",
"I": "int"
}
class QuestionDefinitions:
"""
"""
def __init__(self):
self.datafile = None
self.quotafile = None
self.qps_version = None
self.entries = []
self.entries_datafile = {}
def parse_qdf(self, path):
with codecs.open(path, "r", "latin1") as file:
qdf_contents = file.read()
qdf_lines = qdf_contents.split("\r\n")
inside_entry = False
inside_response = False
serial_entry = Entry()
serial_entry.pandas_type = "int"
serial_entry.qps_type = "I"
self.entries.append("SERIAL")
self.entries_datafile["SERIAL"] = serial_entry
for line in qdf_lines:
if line.startswith("C S "):
m = re.match(r"^C S (\d+)L(\d+)", line)
self.entries_datafile["SERIAL"].start = eval(str(m.group(1)))
self.entries_datafile["SERIAL"].size = eval(str(m.group(2)))
elif line == "Q B":
inside_entry = True
entry = Entry()
continue
elif line == "Q E":
inside_entry = False
inside_response = False
if entry.pandas_type:
self.entries_datafile[entry.name] = entry
self.entries.append(entry.name)
elif line.startswith("X I ") and inside_entry:
m = re.match(r"^X I [0-9]{1,7} (.*)$", line)
if m:
entry.instructions = m.group(1)
else:
entry.instructions = None
elif line.startswith("Q ") and inside_entry:
m = re.match(r"^Q \[(.*?)\] (.*)$", line)
entry.name = m.group(1)
entry.text = m.group(2)
elif line.startswith("T ") and inside_entry and not inside_response:
m = re.match(r"^T (.*?)$", line)
entry.qps_type = m.group(1)
try:
entry.pandas_type = types_dict[m.group(1)]
except KeyError:
entry.pandas_type = None
if m.group(1) == "T":
entry.start = None
entry.size = None
entry.max_sel = None
elif line.startswith("L ") and inside_entry:
m = re.match(r"^L (\d+)L(\d+)R*(\d+)*$", line)
entry.start = eval(str(m.group(1)))
entry.size = eval(str(m.group(2)))
entry.max_sel = eval(str(m.group(3)))
inside_response = True
entry.responses = []
elif line.startswith("R ") and inside_response:
m = re.match(r"^R (.*?)$", line)
entry.responses.append(m.group(1))
def read_data(self, datafile):
if len(self.entries) == 0:
raise ValueError("There are no entries in the Question definitions")
entry_names = []
entry_specs = []
entry_types = {}
for entry in self.entries:
entry_object = self.entries_datafile[entry]
if entry_object.qps_type == "M":
if entry_object.max_sel:
max_digits = np.floor(np.log10(entry_object.max_sel)) + 1
fmt = "%s${0:0>%dd}" % (entry, max_digits)
spec1, spec2 = entry_object.start, entry_object.start + entry_object.size
for i in range(entry_object.max_sel):
column_name = fmt.format(i + 1)
entry_names.append(column_name)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[column_name] = entry_object.pandas_type
spec1, spec2 = spec1 + entry_object.size, spec2 + entry_object.size
else:
max_digits = np.floor(np.log10(len(entry_object.responses))) + 1
fmt = "%s${0:0>%dd}" % (entry, max_digits)
spec1, spec2 = entry_object.start, entry_object.start + 1
for i, response in enumerate(entry_object.responses):
column_name = fmt.format(i + 1)
entry_names.append(column_name)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[column_name] = entry_object.pandas_type
spec1, spec2 = spec1 + 1, spec2 + 1
else:
spec1, spec2 = entry_object.start, entry_object.start + entry_object.size
entry_names.append(entry)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[entry] = entry_object.pandas_type
return pd.read_fwf(datafile, colspecs=entry_specs, names=entry_names)
def main():
pass
if __name__ == "__main__":
main()
| gpl-3.0 |
lfz/Guided-Denoise | Attackset/Iter8_v3_resv2_inresv2_random/nets/resnet_v1_test.py | 33 | 18981 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v1
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
spatial_squeeze=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
spatial_squeeze=False,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
yestech/gae-django-template | django/contrib/auth/admin.py | 153 | 6848 | from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AdminPasswordChangeForm
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
csrf_protect_m = method_decorator(csrf_protect)
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Groups'), {'fields': ('groups',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('user_permissions',)
def __call__(self, request, url):
# this should not be here, but must be due to the way __call__ routes
# in ModelAdmin.
if url is None:
return self.changelist_view(request)
if url.endswith('password'):
return self.user_change_password(request, url.split('/')[0])
return super(UserAdmin, self).__call__(request, url)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls.defaults import patterns
return patterns('',
(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404('Your user does not have the "Change user" permission. In order to add users, Django requires that your user account have both the "Add user" and "Change user" permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
defaults = {
'auto_populated_fields': (),
'username_help_text': self.model._meta.get_field('username').help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url, extra_context)
def user_change_password(self, request, id):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.model, pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
new_user = form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': form.base_fields.keys()})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
return render_to_response(self.change_user_password_template or 'admin/auth/user/change_password.html', {
'title': _('Change password: %s') % escape(user.username),
'adminForm': adminForm,
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
'root_path': self.admin_site.root_path,
}, context_instance=RequestContext(request))
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj, post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
| bsd-3-clause |
BeATz-UnKNoWN/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_renames.py | 203 | 2221 | """Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
| apache-2.0 |
fjruizruano/ngs-protocols | sat_cutter.py | 2 | 1544 | #!/usr/bin/python
from Bio import AlignIO
import sys
print "Usage: sat_cutter.py AlignFileFasta"
try:
file = sys.argv[1]
except:
raw_input = ("Introduce Alignment in FASTA format: ")
#open output file
out = open(file+".cut.fas","w")
#load alignment
align = AlignIO.read(open(file),"fasta")
#get reference sequence
ref = str(align[0].seq)
len_ref = len(ref.replace("-",""))
#get middle point
i = -1
for col in range(0,len(ref)):
if ref[col] != "-":
i += 1
if i == len_ref/2:
point = col
#split alignment in two
left = align[0:,:point]
right = align[0:,point:]
#get reference sequence in both alignment
ref_left = str(left[0].seq)
ref_right = str(right[0].seq)
#for the remaining sequences...
for n in range(1,len(left)):
#load sequence form left alignment
sequen = str(left[n].seq)
#get number or hyphens in 5-prime end
hyphens = 0
for nuc in sequen:
if nuc == "-":
hyphens += 1
else:
break
#get number of nucleotides in left reference
ref_left_nuc = len(ref_left[:hyphens].replace("-",""))
#get cut point for right alignment
ref_right_nuc = 0
cut = 0
for nuc in range(0,len(ref_right)):
if ref_right[nuc] != "-":
ref_right_nuc += 1
if ref_right_nuc == ref_left_nuc:
cut = nuc
#write processed sequence
final_seq = str(right[n][:cut+1].seq)+str(sequen[hyphens:])
out.write(">%s\n%s\n" % (str(left[n].id),final_seq))
out.close()
print "WE ARE DONE!"
| gpl-3.0 |
littlstar/chromium.src | build/android/gyp/package_resources.py | 6 | 4802 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=C0301
"""Package resources into an apk.
See https://android.googlesource.com/platform/tools/base/+/master/legacy/ant-tasks/src/main/java/com/android/ant/AaptExecTask.java
and
https://android.googlesource.com/platform/sdk/+/master/files/ant/build.xml
"""
# pylint: enable=C0301
import optparse
import os
import shutil
from util import build_utils
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--android-sdk-tools',
help='path to the Android SDK build tools folder')
parser.add_option('--configuration-name',
help='Gyp\'s configuration name (Debug or Release).')
parser.add_option('--android-manifest', help='AndroidManifest.xml path')
parser.add_option('--version-code', help='Version code for apk.')
parser.add_option('--version-name', help='Version name for apk.')
parser.add_option('--resource-zips',
help='zip files containing resources to be packaged')
parser.add_option('--asset-dir',
help='directories containing assets to be packaged')
parser.add_option('--no-compress', help='disables compression for the '
'given comma separated list of extensions')
parser.add_option('--apk-path',
help='Path to output (partial) apk.')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('android_sdk', 'android_sdk_tools', 'configuration_name',
'android_manifest', 'version_code', 'version_name',
'resource_zips', 'asset_dir', 'apk_path')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def MoveImagesToNonMdpiFolders(res_root):
"""Move images from drawable-*-mdpi-* folders to drawable-* folders.
Why? http://crbug.com/289843
"""
for src_dir_name in os.listdir(res_root):
src_components = src_dir_name.split('-')
if src_components[0] != 'drawable' or 'mdpi' not in src_components:
continue
src_dir = os.path.join(res_root, src_dir_name)
if not os.path.isdir(src_dir):
continue
dst_components = [c for c in src_components if c != 'mdpi']
assert dst_components != src_components
dst_dir_name = '-'.join(dst_components)
dst_dir = os.path.join(res_root, dst_dir_name)
build_utils.MakeDirectory(dst_dir)
for src_file_name in os.listdir(src_dir):
if not src_file_name.endswith('.png'):
continue
src_file = os.path.join(src_dir, src_file_name)
dst_file = os.path.join(dst_dir, src_file_name)
assert not os.path.lexists(dst_file)
shutil.move(src_file, dst_file)
def main():
options = ParseArgs()
android_jar = os.path.join(options.android_sdk, 'android.jar')
aapt = os.path.join(options.android_sdk_tools, 'aapt')
with build_utils.TempDir() as temp_dir:
package_command = [aapt,
'package',
'--version-code', options.version_code,
'--version-name', options.version_name,
'-M', options.android_manifest,
'--no-crunch',
'-f',
'--auto-add-overlay',
'-I', android_jar,
'-F', options.apk_path,
]
if options.no_compress:
for ext in options.no_compress.split(','):
package_command += ['-0', ext]
if os.path.exists(options.asset_dir):
package_command += ['-A', options.asset_dir]
dep_zips = build_utils.ParseGypList(options.resource_zips)
for z in dep_zips:
subdir = os.path.join(temp_dir, os.path.basename(z))
if os.path.exists(subdir):
raise Exception('Resource zip name conflict: ' + os.path.basename(z))
build_utils.ExtractAll(z, path=subdir)
MoveImagesToNonMdpiFolders(subdir)
package_command += ['-S', subdir]
if 'Debug' in options.configuration_name:
package_command += ['--debug-mode']
build_utils.CheckOutput(
package_command, print_stdout=False, print_stderr=False)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if __name__ == '__main__':
main()
| bsd-3-clause |
dgillis/scrapy | scrapy/spiders/__init__.py | 134 | 3606 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
custom_settings = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
@property
def logger(self):
logger = logging.getLogger(self.name)
return logging.LoggerAdapter(logger, {'spider': self})
def log(self, message, level=logging.DEBUG, **kw):
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def set_crawler(self, crawler):
warnings.warn("set_crawler is deprecated, instantiate and bound the "
"spider to this crawler with from_crawler method "
"instead.",
category=ScrapyDeprecationWarning, stacklevel=2)
assert not hasattr(self, 'crawler'), "Spider already bounded to a " \
"crawler"
self._set_crawler(crawler)
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {}, priority='spider')
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass(
'"from scrapy.spider import spiders" no longer works - use '
'"from scrapy.spiderloader import SpiderLoader" and instantiate '
'it with your project settings"'
)
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
| bsd-3-clause |
gbaier/despeckCL | examples/polsar_test.py | 1 | 3037 | """ example the downloads some fully polarimetric data from ESA's
PolSAR test data set and filters it with NL-SAR """
import os
import urllib.request
import zipfile
import gdal
import matplotlib.pyplot as plt
import numpy as np
# Add build directory to the python search paths for finding the module
# without installing it
import sys
sys.path.insert(0, '../build/swig/python')
import despeckcl
###############################
# #
# Get some test data from ESA #
# #
###############################
URL = 'https://earth.esa.int/documents/653194/658149/'
FILENAME = 'AIRSAR_Flevoland'
DATANAME = 'FLEVOL.STK'
# extracts data to use for training
TRAIN_SUB = np.s_[:, :, 200:230, 200:230]
# extracts data to be filtered and plotted
AREA_SUB = np.s_[:, :, :400, :600]
def stk_reader(stk_filename):
""" see http://gdal.org/frmt_airsar.html for description """
data = gdal.Open(stk_filename)
data = data.ReadAsArray()
mat = np.empty((3, 3, *data.shape[1:]), dtype=np.complex64)
mat[0, 0] = data[0]
mat[0, 1] = data[1]
mat[1, 0] = data[1].conj()
mat[0, 2] = data[2]
mat[2, 0] = data[2].conj()
mat[1, 1] = data[3]
mat[1, 2] = data[4]
mat[2, 1] = data[4].conj()
mat[2, 2] = data[5]
return mat
try:
COVMAT = stk_reader(DATANAME)
except FileNotFoundError:
urllib.request.urlretrieve(URL + FILENAME, FILENAME + '.zip')
with zipfile.ZipFile(FILENAME + '.zip') as zf:
zf.extract(DATANAME)
COVMAT = stk_reader(DATANAME)
#############
# #
# Filtering #
# #
#############
PARAMS = {
'search_window_size': 21,
'patch_sizes': [3, 5, 7],
'scale_sizes': [1, 3],
'h': 3.0,
'c': 49,
'enabled_log_levels': ['warning', 'fatal', 'error'], #, 'debug', 'info']
}
# store and load NL-SAR statistics
STATS_FILENAME = 'polsar_stats.txt'
print('getting similarity statistics')
if os.path.isfile(STATS_FILENAME):
print('found saved statistics... restoring')
NLSAR_STATS = despeckcl.load_nlsar_stats_collection(STATS_FILENAME)
else:
print('computing statistics')
NLSAR_STATS = despeckcl.nlsar_train(
COVMAT[TRAIN_SUB], PARAMS['patch_sizes'], PARAMS['scale_sizes'])
print('storing statistics')
despeckcl.store_nlsar_stats_collection(NLSAR_STATS, STATS_FILENAME)
print('filtering')
COVMAT_FILT = despeckcl.nlsar(
COVMAT[AREA_SUB], nlsar_stats=NLSAR_STATS, **PARAMS)
############
# #
# Plotting #
# #
############
fig = plt.figure()
ax = None
for nr, (data, title) in enumerate(
zip([COVMAT[AREA_SUB], COVMAT_FILT], ['input', 'filtered']), 1):
# extract diagonal elements
diag = np.abs(np.diagonal(data)) + 0.000001
# conversion to dB and normalization
rgb_comp = 10 * np.log10(diag)
rgb_comp_norm = rgb_comp - rgb_comp.min()
rgb_comp_norm /= rgb_comp_norm.max()
ax = fig.add_subplot(1, 2, nr, sharex=ax, sharey=ax)
ax.imshow(rgb_comp_norm)
ax.set_title(title)
plt.show()
| gpl-3.0 |
ros-infrastructure/ros_buildfarm | scripts/doc/create_rosdoc2_task_generator.py | 1 | 2457 | #!/usr/bin/env python3
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from ros_buildfarm.argument import \
add_argument_distribution_repository_key_files
from ros_buildfarm.argument import add_argument_distribution_repository_urls
from ros_buildfarm.argument import add_argument_dockerfile_dir
from ros_buildfarm.common import get_distribution_repository_keys
from ros_buildfarm.common import get_user_id
from ros_buildfarm.templates import create_dockerfile
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate a 'Dockerfile' for the doc job")
parser.add_argument(
'--workspace-root',
required=True,
help='The root path of the workspace')
parser.add_argument(
'--os-name',
required=True,
help="The OS name (e.g. 'ubuntu')")
parser.add_argument(
'--os-code-name',
required=True,
help="The OS code name (e.g. 'xenial')")
parser.add_argument(
'--arch',
required=True,
help="The architecture (e.g. 'amd64')")
add_argument_distribution_repository_urls(parser)
add_argument_distribution_repository_key_files(parser)
add_argument_dockerfile_dir(parser)
args = parser.parse_args(argv)
print('Running generation of documentation')
# generate Dockerfile
data = {
'os_name': args.os_name,
'os_code_name': args.os_code_name,
'arch': args.arch,
'distribution_repository_urls': args.distribution_repository_urls,
'distribution_repository_keys': get_distribution_repository_keys(
args.distribution_repository_urls,
args.distribution_repository_key_files),
'uid': get_user_id(),
}
create_dockerfile('doc/rosdoc2_task.Dockerfile.em', data, args.dockerfile_dir)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
weimingtom/python-for-android | python3-alpha/extra_modules/gdata/contacts/__init__.py | 119 | 28208 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
__author__ = 'dbrattli (Dag Brattli)'
import atom
import gdata
## Constants from http://code.google.com/apis/gdata/elements.html ##
REL_HOME = 'http://schemas.google.com/g/2005#home'
REL_WORK = 'http://schemas.google.com/g/2005#work'
REL_OTHER = 'http://schemas.google.com/g/2005#other'
# AOL Instant Messenger protocol
IM_AIM = 'http://schemas.google.com/g/2005#AIM'
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
# Google Talk protocol
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
# Different phone types, for more info see:
# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber
PHONE_CAR = 'http://schemas.google.com/g/2005#car'
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
PHONE_HOME = REL_HOME
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
PHONE_OTHER = REL_OTHER
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
PHONE_WORK = REL_WORK
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile'
PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager'
PHONE_MAIN = 'http://schemas.google.com/g/2005#main'
PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant'
PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback'
PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main'
PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn'
PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax'
PHONE_RADIO = 'http://schemas.google.com/g/2005#radio'
PHONE_TELEX = 'http://schemas.google.com/g/2005#telex'
PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
class GDataBase(atom.AtomBase):
"""The Google Contacts intermediate class from atom.AtomBase."""
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class ContactsBase(GDataBase):
"""The Google Contacts intermediate class for Contacts namespace."""
_namespace = CONTACTS_NAMESPACE
class OrgName(GDataBase):
"""The Google Contacts OrgName element."""
_tag = 'orgName'
class OrgTitle(GDataBase):
"""The Google Contacts OrgTitle element."""
_tag = 'orgTitle'
class OrgDepartment(GDataBase):
"""The Google Contacts OrgDepartment element."""
_tag = 'orgDepartment'
class OrgJobDescription(GDataBase):
"""The Google Contacts OrgJobDescription element."""
_tag = 'orgJobDescription'
class Where(GDataBase):
"""The Google Contacts Where element."""
_tag = 'where'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, rel=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel
self.label = label
self.value_string = value_string
class When(GDataBase):
"""The Google Contacts When element."""
_tag = 'when'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
_attributes['label'] = 'label'
def __init__(self, start_time=None, end_time=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.start_time = start_time
self.end_time = end_time
self.label = label
class Organization(GDataBase):
"""The Google Contacts Organization element."""
_tag = 'organization'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}orgName' % GDataBase._namespace] = (
'org_name', OrgName)
_children['{%s}orgTitle' % GDataBase._namespace] = (
'org_title', OrgTitle)
_children['{%s}orgDepartment' % GDataBase._namespace] = (
'org_department', OrgDepartment)
_children['{%s}orgJobDescription' % GDataBase._namespace] = (
'org_job_description', OrgJobDescription)
#_children['{%s}where' % GDataBase._namespace] = ('where', Where)
def __init__(self, label=None, rel=None, primary='false', org_name=None,
org_title=None, org_department=None, org_job_description=None,
where=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.primary = primary
self.org_name = org_name
self.org_title = org_title
self.org_department = org_department
self.org_job_description = org_job_description
self.where = where
class PostalAddress(GDataBase):
"""The Google Contacts PostalAddress element."""
_tag = 'postalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
class FormattedAddress(GDataBase):
"""The Google Contacts FormattedAddress element."""
_tag = 'formattedAddress'
class StructuredPostalAddress(GDataBase):
"""The Google Contacts StructuredPostalAddress element."""
_tag = 'structuredPostalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}formattedAddress' % GDataBase._namespace] = (
'formatted_address', FormattedAddress)
def __init__(self, rel=None, primary=None,
formatted_address=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
self.formatted_address = formatted_address
class IM(GDataBase):
"""The Google Contacts IM element."""
_tag = 'im'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['protocol'] = 'protocol'
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, primary='false', rel=None, address=None, protocol=None,
label=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.protocol = protocol
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
class Email(GDataBase):
"""The Google Contacts Email element."""
_tag = 'email'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
def __init__(self, label=None, rel=None, address=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.address = address
self.primary = primary
class PhoneNumber(GDataBase):
"""The Google Contacts PhoneNumber element."""
_tag = 'phoneNumber'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['uri'] = 'uri'
_attributes['primary'] = 'primary'
def __init__(self, label=None, rel=None, uri=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.uri = uri
self.primary = primary
class Nickname(ContactsBase):
"""The Google Contacts Nickname element."""
_tag = 'nickname'
class Occupation(ContactsBase):
"""The Google Contacts Occupation element."""
_tag = 'occupation'
class Gender(ContactsBase):
"""The Google Contacts Gender element."""
_tag = 'gender'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.value = value
class Birthday(ContactsBase):
"""The Google Contacts Birthday element."""
_tag = 'birthday'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['when'] = 'when'
def __init__(self, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.when = when
class Relation(ContactsBase):
"""The Google Contacts Relation element."""
_tag = 'relation'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, label=None, rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
def RelationFromString(xml_string):
return atom.CreateClassFromXMLString(Relation, xml_string)
class UserDefinedField(ContactsBase):
"""The Google Contacts UserDefinedField element."""
_tag = 'userDefinedField'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['key'] = 'key'
_attributes['value'] = 'value'
def __init__(self, key=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key = key
self.value = value
def UserDefinedFieldFromString(xml_string):
return atom.CreateClassFromXMLString(UserDefinedField, xml_string)
class Website(ContactsBase):
"""The Google Contacts Website element."""
_tag = 'website'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['label'] = 'label'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, href=None, label=None, primary='false', rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.href = href
self.label = label
self.primary = primary
self.rel = rel
def WebsiteFromString(xml_string):
return atom.CreateClassFromXMLString(Website, xml_string)
class ExternalId(ContactsBase):
"""The Google Contacts ExternalId element."""
_tag = 'externalId'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['value'] = 'value'
def __init__(self, label=None, rel=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.value = value
def ExternalIdFromString(xml_string):
return atom.CreateClassFromXMLString(ExternalId, xml_string)
class Event(ContactsBase):
"""The Google Contacts Event element."""
_tag = 'event'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_children['{%s}when' % ContactsBase._namespace] = ('when', When)
def __init__(self, label=None, rel=None, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.when = when
def EventFromString(xml_string):
return atom.CreateClassFromXMLString(Event, xml_string)
class Deleted(GDataBase):
"""The Google Contacts Deleted element."""
_tag = 'deleted'
class GroupMembershipInfo(ContactsBase):
"""The Google Contacts GroupMembershipInfo element."""
_tag = 'groupMembershipInfo'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['deleted'] = 'deleted'
_attributes['href'] = 'href'
def __init__(self, deleted=None, href=None, text=None,
extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.deleted = deleted
self.href = href
class PersonEntry(gdata.BatchEntry):
"""Base class for ContactEntry and ProfileEntry."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', [Organization])
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = (
'phone_number', [PhoneNumber])
_children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation)
_children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender)
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
[PostalAddress])
_children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = (
'structured_postal_address', [StructuredPostalAddress])
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
_children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation])
_children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = (
'user_defined_field', [UserDefinedField])
_children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website])
_children['{%s}externalId' % CONTACTS_NAMESPACE] = (
'external_id', [ExternalId])
_children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event])
# The following line should be removed once the Python support
# for GData 2.0 is mature.
_attributes = gdata.BatchEntry._attributes.copy()
_attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag'
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.organization = organization or []
self.phone_number = phone_number or []
self.nickname = nickname
self.occupation = occupation
self.gender = gender
self.birthday = birthday
self.postal_address = postal_address or []
self.structured_postal_address = structured_postal_address or []
self.email = email or []
self.im = im or []
self.relation = relation or []
self.user_defined_field = user_defined_field or []
self.website = website or []
self.external_id = external_id or []
self.event = event or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# The following line should be removed once the Python support
# for GData 2.0 is mature.
self.etag = etag
class ContactEntry(PersonEntry):
"""A Google Contact flavor of an Atom Entry."""
_children = PersonEntry._children.copy()
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
'group_membership_info', [GroupMembershipInfo])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
# Overwrite the organization rule in PersonEntry so that a ContactEntry
# may only contain one <gd:organization> element.
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', Organization)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None,
deleted=None, extended_property=None,
group_membership_info=None):
PersonEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated,
organization=organization, phone_number=phone_number,
nickname=nickname, occupation=occupation,
gender=gender, birthday=birthday,
postal_address=postal_address,
structured_postal_address=structured_postal_address,
email=email, im=im, relation=relation,
user_defined_field=user_defined_field,
website=website, external_id=external_id, event=event,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes, etag=etag)
self.deleted = deleted
self.extended_property = extended_property or []
self.group_membership_info = group_membership_info or []
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
def ContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Contacts feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ContactsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
class GroupEntry(gdata.BatchEntry):
"""Represents a contact group."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None,
rights=None, source=None, summary=None, control=None,
title=None, updated=None,
extended_property=None, batch_operation=None, batch_id=None,
batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.extended_property = extended_property or []
def GroupEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
class GroupsFeed(gdata.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
def GroupsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Profiles feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
| apache-2.0 |
nickmoline/feedsanitizer | django/contrib/localflavor/mx/mx_states.py | 350 | 1251 | # -*- coding: utf-8 -*-
"""
A list of Mexican states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
from django.utils.translation import ugettext_lazy as _
STATE_CHOICES = (
('AGU', _(u'Aguascalientes')),
('BCN', _(u'Baja California')),
('BCS', _(u'Baja California Sur')),
('CAM', _(u'Campeche')),
('CHH', _(u'Chihuahua')),
('CHP', _(u'Chiapas')),
('COA', _(u'Coahuila')),
('COL', _(u'Colima')),
('DIF', _(u'Distrito Federal')),
('DUR', _(u'Durango')),
('GRO', _(u'Guerrero')),
('GUA', _(u'Guanajuato')),
('HID', _(u'Hidalgo')),
('JAL', _(u'Jalisco')),
('MEX', _(u'Estado de México')),
('MIC', _(u'Michoacán')),
('MOR', _(u'Morelos')),
('NAY', _(u'Nayarit')),
('NLE', _(u'Nuevo León')),
('OAX', _(u'Oaxaca')),
('PUE', _(u'Puebla')),
('QUE', _(u'Querétaro')),
('ROO', _(u'Quintana Roo')),
('SIN', _(u'Sinaloa')),
('SLP', _(u'San Luis Potosí')),
('SON', _(u'Sonora')),
('TAB', _(u'Tabasco')),
('TAM', _(u'Tamaulipas')),
('TLA', _(u'Tlaxcala')),
('VER', _(u'Veracruz')),
('YUC', _(u'Yucatán')),
('ZAC', _(u'Zacatecas')),
)
| mit |
bluemask2001/namebench | nb_third_party/dns/opcode.py | 248 | 2615 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
| apache-2.0 |
DarkEnergyScienceCollaboration/ReprocessingTaskForce | config/w_2017_31/processCcdConfig_u.py | 28 | 3387 | # Apply the brighter fatter correction
config.isr.doBrighterFatter=False
config.charImage.repair.cosmicray.nCrPixelMax=1000000
# Useul to get to avoid deblending of satellite tracks
config.calibrate.deblend.maxFootprintSize=2000 # 2200
# Use psfex instead of pca
import lsst.meas.extensions.psfex.psfexPsfDeterminer
config.charImage.measurePsf.psfDeterminer.name='psfex'
# The following should be included for u filter in order to lower the source detection threshold
config.charImage.detection.includeThresholdMultiplier=1.0
# Run CModel
import lsst.meas.modelfit
config.charImage.measurement.plugins.names |= ["modelfit_DoubleShapeletPsfApprox", "modelfit_CModel"]
# Run astrometry using the new htm reference catalog format
# The following retargets are necessary until the new scheme becomes standard
from lsst.meas.algorithms import LoadIndexedReferenceObjectsTask
config.calibrate.astromRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
config.calibrate.photoRefObjLoader.retarget(LoadIndexedReferenceObjectsTask)
# Use new astrometry fitter
from lsst.meas.astrom import FitSipDistortionTask
config.calibrate.astrometry.wcsFitter.retarget(FitSipDistortionTask)
config.calibrate.astrometry.wcsFitter.order = 3
config.calibrate.astrometry.matcher.maxMatchDistArcSec=5
# Select external catalogs for Astrometry and Photometry
config.calibrate.photoRefObjLoader.ref_dataset_name='sdss'
#config.calibrate.astromRefObjLoader.ref_dataset_name='gaia'
config.calibrate.astromRefObjLoader.ref_dataset_name='pan-starrs'
#config.calibrate.astromRefObjLoader.ref_dataset_name='sdss'
# Astrometry with panstarrs
config.calibrate.astromRefObjLoader.filterMap = {
'u':'g',
'g':'g',
'r':'r',
'i':'i',
'i2':'i',
'z':'z',
'y':'y',
}
# Astrometry with gaia
#config.calibrate.astromRefObjLoader.filterMap = {
# 'u':'phot_g_mean_mag',
# 'g':'phot_g_mean_mag',
# 'r':'phot_g_mean_mag',
# 'i':'phot_g_mean_mag',
# 'z':'phot_g_mean_mag',
# 'y':'phot_g_mean_mag',
#}
# Photometry with sdss
config.calibrate.photoRefObjLoader.filterMap = {
'u': 'U',
'g': 'G',
'r': 'R',
'i': 'I',
'i2': 'I',
'z': 'Z',
'y': 'Z',
}
#Astrometry with sdss
#config.calibrate.astromRefObjLoader.filterMap = {
# 'u': 'U',
# 'g': 'G',
# 'r': 'R',
# 'i': 'I',
# 'z': 'Z',
# 'y': 'Z',
#}
import lsst.pipe.tasks.colorterms
config.calibrate.photoCal.colorterms.data['e2v'].data['i2']=lsst.pipe.tasks.colorterms.Colorterm()
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c2=0.0
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c1=0.003
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].c0=0.0
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].primary='i'
config.calibrate.photoCal.colorterms.data['e2v'].data['i2'].secondary='r'
# use Chebyshev background estimation
config.charImage.background.useApprox=True
config.charImage.detection.background.binSize=128
config.charImage.detection.background.useApprox=True
config.charImage.background.binSize = 128
config.charImage.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
config.charImage.detection.background.binSize = 128
config.charImage.detection.background.undersampleStyle='REDUCE_INTERP_ORDER'
config.charImage.detection.background.binSize = 128
config.charImage.detection.background.undersampleStyle = 'REDUCE_INTERP_ORDER'
| gpl-2.0 |
40223105/w17test | static/Brython3.1.1-20150328-091302/Lib/formatter.py | 751 | 14930 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
| gpl-3.0 |
closeio/flask-admin | flask_admin/tests/sqla/test_translation.py | 11 | 1185 | import json
from nose.tools import eq_, ok_, raises, assert_true
from speaklater import make_lazy_string
from . import setup
from .test_basic import CustomModelView, create_models
class Translator:
translate = False
def __call__(self, string):
if self.translate:
return 'Translated: "{0}"'.format(string)
else:
return string
def test_column_label_translation():
app, db, admin = setup()
Model1, _ = create_models(db)
translated = Translator()
label = make_lazy_string(translated, 'Column1')
view = CustomModelView(Model1, db.session,
column_list=['test1', 'test3'],
column_labels=dict(test1=label),
column_filters=('test1',))
admin.add_view(view)
translated.translate = True
non_lazy_groups = view._get_filter_groups()
json.dumps(non_lazy_groups) # Filter dict is JSON serializable.
ok_(translated('Column1') in non_lazy_groups) # Label was translated.
client = app.test_client()
# Render index with active filter.
rv = client.get('/admin/model1/?flt1_0=test')
eq_(rv.status_code, 200)
| bsd-3-clause |
autyzm-pg/friendly-plans-link | python-version/client/get.py | 1 | 2291 | import socket
import sys
import re
import os
USAGE = '''
Usage: python get.py [action] [files...]
Available actions:
pull - get file to local repository
install - get file to local repository and push onto device
'''
DEFAULTCONFIG = [
"DEFAULT_HOST = 'localhost'",
"DEFAULT_PORT = 5000",
"BUFFERSIZE = 2**20*16"]
try:
with open("config.txt","r") as fuck:
for i in fuck:
if re.match("[A-Za-Z_]+ ?= ?[^\n]+",i):
exec(i)
except:
print "Populating config file with default repository..."
with open("config.txt","w") as fuck:
for i in DEFAULTCONFIG:
fuck.write(i+"\n")
exec(i)
try:
ADDRESS = (DEFAULT_HOST,DEFAULT_PORT)
except:
print "Config file malformed - delete it."
quit()
def get_file_list(filelist):
for file in filelist:
if file:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(ADDRESS)
request = "GET /res/"+file+" HTTP/1.1\r\n\r\n"
print request[:-4]
s.send(request)
data = ''
r = 'filler'
while r!='':
r = s.recv(BUFFERSIZE)
data += r
if "200 OK" not in data:
print "Exception: Request on file '" + file + "' not accepted."
continue
data = data.split('\r\n\r\n')
data = data[1]
data = data.strip()
data = data.decode('base64')
if "/" in file:
path = file.split("/")
path = "/".join(path[:-1])
if not os.path.exists(path):
os.makedirs(path)
with open(file,"w") as fuck:
fuck.write(data)
data = data.split("\n")
if data[0][:14] == "AUTYZM-PACKAGE":
data = data[1:]
data = filter(lambda x: True if x!='' else False ,data)
if "/" in file:
path = file.split("/")
path = "/".join(path[:-1])
data = map(lambda x: path + "/" + x,data)
get_file_list(data)
if len(sys.argv) < 3:
print USAGE
if (sys.argv[1].lower() in ['install','pull']):
get_file_list(sys.argv[2:])
| gpl-3.0 |
agiliq/django | tests/backends/tests.py | 13 | 47443 | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
from decimal import Decimal
import re
import threading
import unittest
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, reset_queries, transaction)
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.utils import format_number, CursorWrapper
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (TestCase, TransactionTestCase, override_settings,
skipUnlessDBFeature, skipIfDBFeature)
from django.test.utils import str_prefix, IgnoreAllDeprecationWarningsMixin
from django.utils import six
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in xrange(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
statements = connection.creation.sql_create_model(models.Square,
style=no_style())
match = re.search('"id" ([^,]+),', statements[0][0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
reset_queries()
with connection.cursor() as cursor:
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retreive the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
available_apps = ['backends']
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
class DBTestSettingsRenamedTests(IgnoreAllDeprecationWarningsMixin, TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
@classmethod
def setUpClass(cls):
# Silence "UserWarning: Overriding setting DATABASES can lead to
# unexpected behavior."
cls.warning_classes.append(UserWarning)
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
| bsd-3-clause |
ryfeus/lambda-packs | Pyrestest_wrk/source/pip/_vendor/distlib/metadata.py | 427 | 38314 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
| mit |
kenshay/ImageScripter | ProgramData/Android/ADB/platform-tools/systrace/catapult/systrace/profile_chrome/fake_agent_1.py | 11 | 1814 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import tempfile
from systrace import trace_result
from systrace import tracing_agents
class FakeAgent(object):
def __init__(self, contents='fake-contents'):
self.contents = contents
self.stopped = False
self.filename = None
self.config = None
self.timeout = None
def StartAgentTracing(self, config, timeout=None):
self.config = config
self.timeout = timeout
return True
# pylint: disable=unused-argument
def StopAgentTracing(self, timeout=None):
self.stopped = True
return True
# pylint: disable=unused-argument
def GetResults(self, timeout=None):
trace_data = open(self._PullTrace()).read()
return trace_result.TraceResult('fakeData', trace_data)
def _PullTrace(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
self.filename = f.name
f.write(self.contents)
return f.name
# pylint: disable=no-self-use
def SupportsExplicitClockSync(self):
return False
# pylint: disable=unused-argument, no-self-use
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
print ('Clock sync marker cannot be recorded since explicit clock sync '
'is not supported.')
def __repr__(self):
return 'faketrace'
class FakeConfig(tracing_agents.TracingConfig):
def __init__(self):
tracing_agents.TracingConfig.__init__(self)
# pylint: disable=unused-argument
def try_create_agent(config):
return FakeAgent()
def add_options(parser):
options = optparse.OptionGroup(parser, 'Fake options.')
return options
# pylint: disable=unused-argument
def get_config(options):
return FakeConfig()
| gpl-3.0 |
mailgun/kafka-pixy | gen/python/kafkapixy_pb2.py | 1 | 52185 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kafkapixy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kafkapixy.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0fkafkapixy.proto\"*\n\x0cRecordHeader\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"\x97\x01\n\x06ProdRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x11\n\tkey_value\x18\x03 \x01(\x0c\x12\x15\n\rkey_undefined\x18\x04 \x01(\x08\x12\x0f\n\x07message\x18\x05 \x01(\x0c\x12\x12\n\nasync_mode\x18\x06 \x01(\x08\x12\x1e\n\x07headers\x18\x07 \x03(\x0b\x32\r.RecordHeader\"+\n\x06ProdRs\x12\x11\n\tpartition\x18\x01 \x01(\x05\x12\x0e\n\x06offset\x18\x02 \x01(\x03\"\x88\x01\n\nConsNAckRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\r\n\x05group\x18\x03 \x01(\t\x12\x0e\n\x06no_ack\x18\x04 \x01(\x08\x12\x10\n\x08\x61uto_ack\x18\x05 \x01(\x08\x12\x15\n\rack_partition\x18\x06 \x01(\x05\x12\x12\n\nack_offset\x18\x07 \x01(\x03\"\x86\x01\n\x06\x43onsRs\x12\x11\n\tpartition\x18\x01 \x01(\x05\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\x11\n\tkey_value\x18\x03 \x01(\x0c\x12\x15\n\rkey_undefined\x18\x04 \x01(\x08\x12\x0f\n\x07message\x18\x05 \x01(\x0c\x12\x1e\n\x07headers\x18\x06 \x03(\x0b\x32\r.RecordHeader\"Y\n\x05\x41\x63kRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\r\n\x05group\x18\x03 \x01(\t\x12\x11\n\tpartition\x18\x04 \x01(\x05\x12\x0e\n\x06offset\x18\x05 \x01(\x03\"\x07\n\x05\x41\x63kRs\"\x93\x01\n\x0fPartitionOffset\x12\x11\n\tpartition\x18\x01 \x01(\x05\x12\r\n\x05\x62\x65gin\x18\x02 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x03 \x01(\x03\x12\r\n\x05\x63ount\x18\x04 \x01(\x03\x12\x0e\n\x06offset\x18\x05 \x01(\x03\x12\x0b\n\x03lag\x18\x06 \x01(\x03\x12\x10\n\x08metadata\x18\x07 \x01(\t\x12\x13\n\x0bsparse_acks\x18\x08 \x01(\t\"=\n\x0cGetOffsetsRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\r\n\x05group\x18\x03 \x01(\t\"1\n\x0cGetOffsetsRs\x12!\n\x07offsets\x18\x01 \x03(\x0b\x32\x10.PartitionOffset\"U\n\x11PartitionMetadata\x12\x11\n\tpartition\x18\x01 \x01(\x05\x12\x0e\n\x06leader\x18\x02 \x01(\x05\x12\x10\n\x08replicas\x18\x03 \x03(\x05\x12\x0b\n\x03isr\x18\x04 \x03(\x05\"M\n\x12GetTopicMetadataRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x17\n\x0fwith_partitions\x18\x03 \x01(\x08\"\xad\x01\n\x12GetTopicMetadataRs\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12/\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x1f.GetTopicMetadataRs.ConfigEntry\x12&\n\npartitions\x18\x03 \x03(\x0b\x32\x12.PartitionMetadata\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"{\n\x0bListTopicRs\x12(\n\x06topics\x18\x01 \x03(\x0b\x32\x18.ListTopicRs.TopicsEntry\x1a\x42\n\x0bTopicsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.GetTopicMetadataRs:\x02\x38\x01\"7\n\x0bListTopicRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\x17\n\x0fwith_partitions\x18\x02 \x01(\x08\"@\n\x0fListConsumersRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\r\n\x05group\x18\x03 \x01(\t\"(\n\x12\x43onsumerPartitions\x12\x12\n\npartitions\x18\x01 \x03(\x05\"\x8a\x01\n\x0e\x43onsumerGroups\x12\x31\n\tconsumers\x18\x01 \x03(\x0b\x32\x1e.ConsumerGroups.ConsumersEntry\x1a\x45\n\x0e\x43onsumersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.ConsumerPartitions:\x02\x38\x01\"\x7f\n\x0fListConsumersRs\x12,\n\x06groups\x18\x01 \x03(\x0b\x32\x1c.ListConsumersRs.GroupsEntry\x1a>\n\x0bGroupsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0f.ConsumerGroups:\x02\x38\x01\"`\n\x0cSetOffsetsRq\x12\x0f\n\x07\x63luster\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\r\n\x05group\x18\x03 \x01(\t\x12!\n\x07offsets\x18\x04 \x03(\x0b\x32\x10.PartitionOffset\"\x0e\n\x0cSetOffsetsRs2\xe9\x02\n\tKafkaPixy\x12\x1d\n\x07Produce\x12\x07.ProdRq\x1a\x07.ProdRs\"\x00\x12%\n\x0b\x43onsumeNAck\x12\x0b.ConsNAckRq\x1a\x07.ConsRs\"\x00\x12\x17\n\x03\x41\x63k\x12\x06.AckRq\x1a\x06.AckRs\"\x00\x12,\n\nGetOffsets\x12\r.GetOffsetsRq\x1a\r.GetOffsetsRs\"\x00\x12,\n\nSetOffsets\x12\r.SetOffsetsRq\x1a\r.SetOffsetsRs\"\x00\x12*\n\nListTopics\x12\x0c.ListTopicRq\x1a\x0c.ListTopicRs\"\x00\x12\x35\n\rListConsumers\x12\x10.ListConsumersRq\x1a\x10.ListConsumersRs\"\x00\x12>\n\x10GetTopicMetadata\x12\x13.GetTopicMetadataRq\x1a\x13.GetTopicMetadataRs\"\x00\x42\x04Z\x02pbb\x06proto3')
)
_RECORDHEADER = _descriptor.Descriptor(
name='RecordHeader',
full_name='RecordHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='RecordHeader.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='RecordHeader.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=61,
)
_PRODRQ = _descriptor.Descriptor(
name='ProdRq',
full_name='ProdRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='ProdRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='ProdRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_value', full_name='ProdRq.key_value', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_undefined', full_name='ProdRq.key_undefined', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='ProdRq.message', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='async_mode', full_name='ProdRq.async_mode', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headers', full_name='ProdRq.headers', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=215,
)
_PRODRS = _descriptor.Descriptor(
name='ProdRs',
full_name='ProdRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition', full_name='ProdRs.partition', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='ProdRs.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=260,
)
_CONSNACKRQ = _descriptor.Descriptor(
name='ConsNAckRq',
full_name='ConsNAckRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='ConsNAckRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='ConsNAckRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='ConsNAckRq.group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='no_ack', full_name='ConsNAckRq.no_ack', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auto_ack', full_name='ConsNAckRq.auto_ack', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ack_partition', full_name='ConsNAckRq.ack_partition', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ack_offset', full_name='ConsNAckRq.ack_offset', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=399,
)
_CONSRS = _descriptor.Descriptor(
name='ConsRs',
full_name='ConsRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition', full_name='ConsRs.partition', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='ConsRs.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_value', full_name='ConsRs.key_value', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key_undefined', full_name='ConsRs.key_undefined', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='ConsRs.message', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headers', full_name='ConsRs.headers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=536,
)
_ACKRQ = _descriptor.Descriptor(
name='AckRq',
full_name='AckRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='AckRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='AckRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='AckRq.group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partition', full_name='AckRq.partition', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='AckRq.offset', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=538,
serialized_end=627,
)
_ACKRS = _descriptor.Descriptor(
name='AckRs',
full_name='AckRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=636,
)
_PARTITIONOFFSET = _descriptor.Descriptor(
name='PartitionOffset',
full_name='PartitionOffset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition', full_name='PartitionOffset.partition', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='begin', full_name='PartitionOffset.begin', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='PartitionOffset.end', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='count', full_name='PartitionOffset.count', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='PartitionOffset.offset', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lag', full_name='PartitionOffset.lag', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='PartitionOffset.metadata', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sparse_acks', full_name='PartitionOffset.sparse_acks', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=639,
serialized_end=786,
)
_GETOFFSETSRQ = _descriptor.Descriptor(
name='GetOffsetsRq',
full_name='GetOffsetsRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='GetOffsetsRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='GetOffsetsRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='GetOffsetsRq.group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=788,
serialized_end=849,
)
_GETOFFSETSRS = _descriptor.Descriptor(
name='GetOffsetsRs',
full_name='GetOffsetsRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='offsets', full_name='GetOffsetsRs.offsets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=851,
serialized_end=900,
)
_PARTITIONMETADATA = _descriptor.Descriptor(
name='PartitionMetadata',
full_name='PartitionMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition', full_name='PartitionMetadata.partition', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='leader', full_name='PartitionMetadata.leader', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replicas', full_name='PartitionMetadata.replicas', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isr', full_name='PartitionMetadata.isr', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=902,
serialized_end=987,
)
_GETTOPICMETADATARQ = _descriptor.Descriptor(
name='GetTopicMetadataRq',
full_name='GetTopicMetadataRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='GetTopicMetadataRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='GetTopicMetadataRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='with_partitions', full_name='GetTopicMetadataRq.with_partitions', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=989,
serialized_end=1066,
)
_GETTOPICMETADATARS_CONFIGENTRY = _descriptor.Descriptor(
name='ConfigEntry',
full_name='GetTopicMetadataRs.ConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='GetTopicMetadataRs.ConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='GetTopicMetadataRs.ConfigEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1197,
serialized_end=1242,
)
_GETTOPICMETADATARS = _descriptor.Descriptor(
name='GetTopicMetadataRs',
full_name='GetTopicMetadataRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='GetTopicMetadataRs.version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='GetTopicMetadataRs.config', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partitions', full_name='GetTopicMetadataRs.partitions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETTOPICMETADATARS_CONFIGENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1069,
serialized_end=1242,
)
_LISTTOPICRS_TOPICSENTRY = _descriptor.Descriptor(
name='TopicsEntry',
full_name='ListTopicRs.TopicsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ListTopicRs.TopicsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ListTopicRs.TopicsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1301,
serialized_end=1367,
)
_LISTTOPICRS = _descriptor.Descriptor(
name='ListTopicRs',
full_name='ListTopicRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='topics', full_name='ListTopicRs.topics', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTTOPICRS_TOPICSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1244,
serialized_end=1367,
)
_LISTTOPICRQ = _descriptor.Descriptor(
name='ListTopicRq',
full_name='ListTopicRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='ListTopicRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='with_partitions', full_name='ListTopicRq.with_partitions', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1369,
serialized_end=1424,
)
_LISTCONSUMERSRQ = _descriptor.Descriptor(
name='ListConsumersRq',
full_name='ListConsumersRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='ListConsumersRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='ListConsumersRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='ListConsumersRq.group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1426,
serialized_end=1490,
)
_CONSUMERPARTITIONS = _descriptor.Descriptor(
name='ConsumerPartitions',
full_name='ConsumerPartitions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partitions', full_name='ConsumerPartitions.partitions', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1492,
serialized_end=1532,
)
_CONSUMERGROUPS_CONSUMERSENTRY = _descriptor.Descriptor(
name='ConsumersEntry',
full_name='ConsumerGroups.ConsumersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ConsumerGroups.ConsumersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ConsumerGroups.ConsumersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1604,
serialized_end=1673,
)
_CONSUMERGROUPS = _descriptor.Descriptor(
name='ConsumerGroups',
full_name='ConsumerGroups',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='consumers', full_name='ConsumerGroups.consumers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONSUMERGROUPS_CONSUMERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1535,
serialized_end=1673,
)
_LISTCONSUMERSRS_GROUPSENTRY = _descriptor.Descriptor(
name='GroupsEntry',
full_name='ListConsumersRs.GroupsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ListConsumersRs.GroupsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ListConsumersRs.GroupsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1740,
serialized_end=1802,
)
_LISTCONSUMERSRS = _descriptor.Descriptor(
name='ListConsumersRs',
full_name='ListConsumersRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groups', full_name='ListConsumersRs.groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTCONSUMERSRS_GROUPSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1675,
serialized_end=1802,
)
_SETOFFSETSRQ = _descriptor.Descriptor(
name='SetOffsetsRq',
full_name='SetOffsetsRq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='SetOffsetsRq.cluster', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='topic', full_name='SetOffsetsRq.topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group', full_name='SetOffsetsRq.group', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offsets', full_name='SetOffsetsRq.offsets', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1804,
serialized_end=1900,
)
_SETOFFSETSRS = _descriptor.Descriptor(
name='SetOffsetsRs',
full_name='SetOffsetsRs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1902,
serialized_end=1916,
)
_PRODRQ.fields_by_name['headers'].message_type = _RECORDHEADER
_CONSRS.fields_by_name['headers'].message_type = _RECORDHEADER
_GETOFFSETSRS.fields_by_name['offsets'].message_type = _PARTITIONOFFSET
_GETTOPICMETADATARS_CONFIGENTRY.containing_type = _GETTOPICMETADATARS
_GETTOPICMETADATARS.fields_by_name['config'].message_type = _GETTOPICMETADATARS_CONFIGENTRY
_GETTOPICMETADATARS.fields_by_name['partitions'].message_type = _PARTITIONMETADATA
_LISTTOPICRS_TOPICSENTRY.fields_by_name['value'].message_type = _GETTOPICMETADATARS
_LISTTOPICRS_TOPICSENTRY.containing_type = _LISTTOPICRS
_LISTTOPICRS.fields_by_name['topics'].message_type = _LISTTOPICRS_TOPICSENTRY
_CONSUMERGROUPS_CONSUMERSENTRY.fields_by_name['value'].message_type = _CONSUMERPARTITIONS
_CONSUMERGROUPS_CONSUMERSENTRY.containing_type = _CONSUMERGROUPS
_CONSUMERGROUPS.fields_by_name['consumers'].message_type = _CONSUMERGROUPS_CONSUMERSENTRY
_LISTCONSUMERSRS_GROUPSENTRY.fields_by_name['value'].message_type = _CONSUMERGROUPS
_LISTCONSUMERSRS_GROUPSENTRY.containing_type = _LISTCONSUMERSRS
_LISTCONSUMERSRS.fields_by_name['groups'].message_type = _LISTCONSUMERSRS_GROUPSENTRY
_SETOFFSETSRQ.fields_by_name['offsets'].message_type = _PARTITIONOFFSET
DESCRIPTOR.message_types_by_name['RecordHeader'] = _RECORDHEADER
DESCRIPTOR.message_types_by_name['ProdRq'] = _PRODRQ
DESCRIPTOR.message_types_by_name['ProdRs'] = _PRODRS
DESCRIPTOR.message_types_by_name['ConsNAckRq'] = _CONSNACKRQ
DESCRIPTOR.message_types_by_name['ConsRs'] = _CONSRS
DESCRIPTOR.message_types_by_name['AckRq'] = _ACKRQ
DESCRIPTOR.message_types_by_name['AckRs'] = _ACKRS
DESCRIPTOR.message_types_by_name['PartitionOffset'] = _PARTITIONOFFSET
DESCRIPTOR.message_types_by_name['GetOffsetsRq'] = _GETOFFSETSRQ
DESCRIPTOR.message_types_by_name['GetOffsetsRs'] = _GETOFFSETSRS
DESCRIPTOR.message_types_by_name['PartitionMetadata'] = _PARTITIONMETADATA
DESCRIPTOR.message_types_by_name['GetTopicMetadataRq'] = _GETTOPICMETADATARQ
DESCRIPTOR.message_types_by_name['GetTopicMetadataRs'] = _GETTOPICMETADATARS
DESCRIPTOR.message_types_by_name['ListTopicRs'] = _LISTTOPICRS
DESCRIPTOR.message_types_by_name['ListTopicRq'] = _LISTTOPICRQ
DESCRIPTOR.message_types_by_name['ListConsumersRq'] = _LISTCONSUMERSRQ
DESCRIPTOR.message_types_by_name['ConsumerPartitions'] = _CONSUMERPARTITIONS
DESCRIPTOR.message_types_by_name['ConsumerGroups'] = _CONSUMERGROUPS
DESCRIPTOR.message_types_by_name['ListConsumersRs'] = _LISTCONSUMERSRS
DESCRIPTOR.message_types_by_name['SetOffsetsRq'] = _SETOFFSETSRQ
DESCRIPTOR.message_types_by_name['SetOffsetsRs'] = _SETOFFSETSRS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RecordHeader = _reflection.GeneratedProtocolMessageType('RecordHeader', (_message.Message,), dict(
DESCRIPTOR = _RECORDHEADER,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:RecordHeader)
))
_sym_db.RegisterMessage(RecordHeader)
ProdRq = _reflection.GeneratedProtocolMessageType('ProdRq', (_message.Message,), dict(
DESCRIPTOR = _PRODRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ProdRq)
))
_sym_db.RegisterMessage(ProdRq)
ProdRs = _reflection.GeneratedProtocolMessageType('ProdRs', (_message.Message,), dict(
DESCRIPTOR = _PRODRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ProdRs)
))
_sym_db.RegisterMessage(ProdRs)
ConsNAckRq = _reflection.GeneratedProtocolMessageType('ConsNAckRq', (_message.Message,), dict(
DESCRIPTOR = _CONSNACKRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ConsNAckRq)
))
_sym_db.RegisterMessage(ConsNAckRq)
ConsRs = _reflection.GeneratedProtocolMessageType('ConsRs', (_message.Message,), dict(
DESCRIPTOR = _CONSRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ConsRs)
))
_sym_db.RegisterMessage(ConsRs)
AckRq = _reflection.GeneratedProtocolMessageType('AckRq', (_message.Message,), dict(
DESCRIPTOR = _ACKRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:AckRq)
))
_sym_db.RegisterMessage(AckRq)
AckRs = _reflection.GeneratedProtocolMessageType('AckRs', (_message.Message,), dict(
DESCRIPTOR = _ACKRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:AckRs)
))
_sym_db.RegisterMessage(AckRs)
PartitionOffset = _reflection.GeneratedProtocolMessageType('PartitionOffset', (_message.Message,), dict(
DESCRIPTOR = _PARTITIONOFFSET,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:PartitionOffset)
))
_sym_db.RegisterMessage(PartitionOffset)
GetOffsetsRq = _reflection.GeneratedProtocolMessageType('GetOffsetsRq', (_message.Message,), dict(
DESCRIPTOR = _GETOFFSETSRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:GetOffsetsRq)
))
_sym_db.RegisterMessage(GetOffsetsRq)
GetOffsetsRs = _reflection.GeneratedProtocolMessageType('GetOffsetsRs', (_message.Message,), dict(
DESCRIPTOR = _GETOFFSETSRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:GetOffsetsRs)
))
_sym_db.RegisterMessage(GetOffsetsRs)
PartitionMetadata = _reflection.GeneratedProtocolMessageType('PartitionMetadata', (_message.Message,), dict(
DESCRIPTOR = _PARTITIONMETADATA,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:PartitionMetadata)
))
_sym_db.RegisterMessage(PartitionMetadata)
GetTopicMetadataRq = _reflection.GeneratedProtocolMessageType('GetTopicMetadataRq', (_message.Message,), dict(
DESCRIPTOR = _GETTOPICMETADATARQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:GetTopicMetadataRq)
))
_sym_db.RegisterMessage(GetTopicMetadataRq)
GetTopicMetadataRs = _reflection.GeneratedProtocolMessageType('GetTopicMetadataRs', (_message.Message,), dict(
ConfigEntry = _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _GETTOPICMETADATARS_CONFIGENTRY,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:GetTopicMetadataRs.ConfigEntry)
))
,
DESCRIPTOR = _GETTOPICMETADATARS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:GetTopicMetadataRs)
))
_sym_db.RegisterMessage(GetTopicMetadataRs)
_sym_db.RegisterMessage(GetTopicMetadataRs.ConfigEntry)
ListTopicRs = _reflection.GeneratedProtocolMessageType('ListTopicRs', (_message.Message,), dict(
TopicsEntry = _reflection.GeneratedProtocolMessageType('TopicsEntry', (_message.Message,), dict(
DESCRIPTOR = _LISTTOPICRS_TOPICSENTRY,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListTopicRs.TopicsEntry)
))
,
DESCRIPTOR = _LISTTOPICRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListTopicRs)
))
_sym_db.RegisterMessage(ListTopicRs)
_sym_db.RegisterMessage(ListTopicRs.TopicsEntry)
ListTopicRq = _reflection.GeneratedProtocolMessageType('ListTopicRq', (_message.Message,), dict(
DESCRIPTOR = _LISTTOPICRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListTopicRq)
))
_sym_db.RegisterMessage(ListTopicRq)
ListConsumersRq = _reflection.GeneratedProtocolMessageType('ListConsumersRq', (_message.Message,), dict(
DESCRIPTOR = _LISTCONSUMERSRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListConsumersRq)
))
_sym_db.RegisterMessage(ListConsumersRq)
ConsumerPartitions = _reflection.GeneratedProtocolMessageType('ConsumerPartitions', (_message.Message,), dict(
DESCRIPTOR = _CONSUMERPARTITIONS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ConsumerPartitions)
))
_sym_db.RegisterMessage(ConsumerPartitions)
ConsumerGroups = _reflection.GeneratedProtocolMessageType('ConsumerGroups', (_message.Message,), dict(
ConsumersEntry = _reflection.GeneratedProtocolMessageType('ConsumersEntry', (_message.Message,), dict(
DESCRIPTOR = _CONSUMERGROUPS_CONSUMERSENTRY,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ConsumerGroups.ConsumersEntry)
))
,
DESCRIPTOR = _CONSUMERGROUPS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ConsumerGroups)
))
_sym_db.RegisterMessage(ConsumerGroups)
_sym_db.RegisterMessage(ConsumerGroups.ConsumersEntry)
ListConsumersRs = _reflection.GeneratedProtocolMessageType('ListConsumersRs', (_message.Message,), dict(
GroupsEntry = _reflection.GeneratedProtocolMessageType('GroupsEntry', (_message.Message,), dict(
DESCRIPTOR = _LISTCONSUMERSRS_GROUPSENTRY,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListConsumersRs.GroupsEntry)
))
,
DESCRIPTOR = _LISTCONSUMERSRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:ListConsumersRs)
))
_sym_db.RegisterMessage(ListConsumersRs)
_sym_db.RegisterMessage(ListConsumersRs.GroupsEntry)
SetOffsetsRq = _reflection.GeneratedProtocolMessageType('SetOffsetsRq', (_message.Message,), dict(
DESCRIPTOR = _SETOFFSETSRQ,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:SetOffsetsRq)
))
_sym_db.RegisterMessage(SetOffsetsRq)
SetOffsetsRs = _reflection.GeneratedProtocolMessageType('SetOffsetsRs', (_message.Message,), dict(
DESCRIPTOR = _SETOFFSETSRS,
__module__ = 'kafkapixy_pb2'
# @@protoc_insertion_point(class_scope:SetOffsetsRs)
))
_sym_db.RegisterMessage(SetOffsetsRs)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\002pb'))
_GETTOPICMETADATARS_CONFIGENTRY.has_options = True
_GETTOPICMETADATARS_CONFIGENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_LISTTOPICRS_TOPICSENTRY.has_options = True
_LISTTOPICRS_TOPICSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CONSUMERGROUPS_CONSUMERSENTRY.has_options = True
_CONSUMERGROUPS_CONSUMERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_LISTCONSUMERSRS_GROUPSENTRY.has_options = True
_LISTCONSUMERSRS_GROUPSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_KAFKAPIXY = _descriptor.ServiceDescriptor(
name='KafkaPixy',
full_name='KafkaPixy',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1919,
serialized_end=2280,
methods=[
_descriptor.MethodDescriptor(
name='Produce',
full_name='KafkaPixy.Produce',
index=0,
containing_service=None,
input_type=_PRODRQ,
output_type=_PRODRS,
options=None,
),
_descriptor.MethodDescriptor(
name='ConsumeNAck',
full_name='KafkaPixy.ConsumeNAck',
index=1,
containing_service=None,
input_type=_CONSNACKRQ,
output_type=_CONSRS,
options=None,
),
_descriptor.MethodDescriptor(
name='Ack',
full_name='KafkaPixy.Ack',
index=2,
containing_service=None,
input_type=_ACKRQ,
output_type=_ACKRS,
options=None,
),
_descriptor.MethodDescriptor(
name='GetOffsets',
full_name='KafkaPixy.GetOffsets',
index=3,
containing_service=None,
input_type=_GETOFFSETSRQ,
output_type=_GETOFFSETSRS,
options=None,
),
_descriptor.MethodDescriptor(
name='SetOffsets',
full_name='KafkaPixy.SetOffsets',
index=4,
containing_service=None,
input_type=_SETOFFSETSRQ,
output_type=_SETOFFSETSRS,
options=None,
),
_descriptor.MethodDescriptor(
name='ListTopics',
full_name='KafkaPixy.ListTopics',
index=5,
containing_service=None,
input_type=_LISTTOPICRQ,
output_type=_LISTTOPICRS,
options=None,
),
_descriptor.MethodDescriptor(
name='ListConsumers',
full_name='KafkaPixy.ListConsumers',
index=6,
containing_service=None,
input_type=_LISTCONSUMERSRQ,
output_type=_LISTCONSUMERSRS,
options=None,
),
_descriptor.MethodDescriptor(
name='GetTopicMetadata',
full_name='KafkaPixy.GetTopicMetadata',
index=7,
containing_service=None,
input_type=_GETTOPICMETADATARQ,
output_type=_GETTOPICMETADATARS,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_KAFKAPIXY)
DESCRIPTOR.services_by_name['KafkaPixy'] = _KAFKAPIXY
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
praekelt/molo | molo/core/management/commands/add_tag_to_article.py | 3 | 3734 | from __future__ import absolute_import, unicode_literals
import csv
from babel import Locale
from django.core.management.base import BaseCommand
from molo.core.models import (
Languages, Tag, ArticlePage, ArticlePageTags, Main, SectionIndexPage,
TagIndexPage)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('csv_name', type=str)
parser.add_argument('locale', type=str)
def handle(self, *args, **options):
csv_name = options.get('csv_name', None)
locale = options.get('locale', None)
mains = Main.objects.all()
articles = {}
with open(csv_name) as articles_tags:
reader = csv.reader(articles_tags)
if mains:
for row in reader:
key = row[0]
articles[key] = row[1:]
for main in mains:
section_index = SectionIndexPage.objects.child_of(main).first()
tag_index = TagIndexPage.objects.child_of(main).first()
main_lang = Languages.for_site(main.get_site()).languages.filter(
is_active=True, is_main_language=True).first()
if section_index and tag_index and main_lang:
if main_lang.locale == locale:
for article_slug in articles:
article = ArticlePage.objects.descendant_of(
section_index).filter(slug=article_slug).first()
if article:
for tag_title in articles.get(article_slug):
tag = Tag.objects.child_of(tag_index).filter(
title=tag_title.strip()).first()
if tag:
if not article.nav_tags.filter(
tag__title=tag):
article_page_tag = ArticlePageTags(
page=article, tag=tag)
article_page_tag.save()
else:
self.stdout.write(self.style.WARNING(
'Tag "%s" has been already asigned'
' to "%s" in "%s"'
% (tag, article, main)))
else:
self.stdout.write(self.style.NOTICE(
'Tag "%s" does not exist in "%s"'
% (tag_title, main)))
else:
self.stdout.write(self.style.ERROR(
'Article "%s" does not exist in "%s"'
% (article_slug, main.get_site())))
else:
self.stdout.write(self.style.NOTICE(
'Main language of "%s" is not "%s".'
' The main language is "%s"'
% (main.get_site(), Locale(locale).english_name,
main_lang)))
else:
if not section_index:
self.stdout.write(self.style.NOTICE(
'Section Index Page does not exist in "%s"' % main))
if not tag_index:
self.stdout.write(self.style.NOTICE(
'Tag Index Page does not exist in "%s"' % main))
if not main_lang:
self.stdout.write(self.style.NOTICE(
'Main language does not exist in "%s"' % main))
| bsd-2-clause |
theteam/scampcat.com | fabfile.py | 1 | 4594 | # Fabric file for scampcat
import ConfigParser
import datetime
import os
from fabric.api import run, local, env, get, prompt, sudo, cd
from fabric.contrib import django
from fabric.contrib.console import confirm
django.settings_module('scampcat.settings.alfredo')
from django.conf import settings
here_dir = os.path.dirname(os.path.realpath(__file__))
here = lambda *x: os.path.join(here_dir, *x)
config = ConfigParser.ConfigParser()
config.readfp(open(here('fabric.cfg')))
env.project = config.get('general', 'project')
env.project_root = settings.PROJECT_ROOT
tmp_time = datetime.datetime.now()
env.time = tmp_time.strftime("%Y%m%d_%H%M")
env.media_root = settings.MEDIA_ROOT
def production():
"""Production server"""
env.alias = 'production'
env.hosts = config.get('production', 'hosts').split(',')
env.user = config.get('production', 'user')
env.path = config.get('production', 'path')
env.db_name = config.get('production', 'db_name')
env.db_user = config.get('production', 'db_user')
env.db_pass = config.get('production', 'db_pass')
env.branch = config.get('production', 'branch')
env.release_name = '%(project)s_%(time)s' % env
def deploy(tag=None):
"""Deployment actions"""
# override the version when one is created
env.tag = tag if tag else ''
if env.alias == 'production':
if confirm("Are you sure you want to deploy to %(alias)s" % env):
export_release()
symlink_release()
update_static()
else:
with cd(env.path):
run('git pull origin %(version)s' % env)
def export_release():
"""Exports a release with the current time and date"""
env.origin_path = "%s/src" % env.path
run('cd %(origin_path)s && git pull --rebase %(tag)s' % env)
run('cp -r %(origin_path)s %(path)s/releases/%(release_name)s' % env)
def symlink_release():
"""Removes the old release and symlinks latest release to current"""
# remove current deployment
run('rm %(path)s/current' % env)
# symlink deployment
run('ln -s %(path)s/releases/%(release_name)s/ %(path)s/current' % env)
def update_static():
"""Runs the new django collecstatic command"""
if env.alias == 'production':
with cd('%(path)s/current/scampcat/' % env):
run('%(path)s/venv/bin/python manage.py collectstatic '
'--settings=settings.production --noinput' % env)
def clean():
"""Remove pyc files from the server."""
run('find %s -iname \*pyc -delete' % env.path)
def restart():
"""Copy the apache config for this site and restarts the server"""
sudo('service apache2 restart', pty=False)
def dumpdb():
"""Dumps and retrieves the server database"""
# dump of the database
run("mysqldump -u%(db_user)s -p%(db_pass)s %(db_name)s > "
"%(db_name)s-%(time)s.sql" % env)
# compressing it
run("tar cvfz %(db_name)s-%(time)s.sql.tgz %(db_name)s-%(time)s.sql" % env)
# retrieve copy
get("%(db_name)s-%(time)s.sql.tgz" % env,
"%(db_name)s-%(time)s.sql.tgz" % env)
# clean remote
run("rm %(db_name)s-%(time)s.sql" % env)
run("rm %(db_name)s-%(time)s.sql.tgz" % env)
def syncdb():
"""Syncs the database with the local one"""
dumpdb()
local("tar xvfz %(db_name)s-%(time)s.sql.tgz" % env)
# get values from local_settings or prompt if empty
settings.DATABASE_USER = settings.DATABASES['default']['USER']
settings.DATABASE_PASSWORD = settings.DATABASES['default']['PASSWORD']
settings.DATABASE_NAME = settings.DATABASES['default']['NAME']
if settings.DATABASE_USER:
env.local_db_user = settings.DATABASES['default']['USER']
else:
# prompt for details
env.local_db_user = prompt("Database User:")
if settings.DATABASE_PASSWORD or settings.DATABASE_PASSWORD == '':
env.local_db_password = settings.DATABASE_PASSWORD
else:
env.local_db_password = prompt("Database password:")
if settings.DATABASE_NAME:
env.local_db_name = settings.DATABASE_NAME
else:
env.local_db_name = prompt("Database name:")
env.local_connection = "mysql -u%(local_db_user)s -p%(local_db_password)s %(local_db_name)s" % env
# drop existing database
local("%(local_connection)s -e \"drop database %(local_db_name)s; "
"create database %(local_db_name)s;\" " % env)
# import database
local("%(local_connection)s < %(db_name)s-%(time)s.sql" % env)
# clean up
local("rm %(db_name)s-%(time)s.sql.tgz" % env)
local("rm %(db_name)s-%(time)s.sql" % env)
| mit |
IONISx/edx-platform | openedx/core/djangoapps/credit/views.py | 11 | 14369 | """
Views for the credit Django app.
"""
import datetime
import json
import logging
from django.conf import settings
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
Http404
)
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import pytz
from rest_framework import viewsets, mixins, permissions
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from openedx.core.djangoapps.credit import api
from openedx.core.djangoapps.credit.exceptions import CreditApiBadRequest, CreditRequestNotFound
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.serializers import CreditCourseSerializer
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from openedx.core.lib.api.mixins import PutAsCreateMixin
from util.date_utils import from_timestamp
from util.json_request import JsonResponse
log = logging.getLogger(__name__)
@require_GET
def get_providers_detail(request):
"""
**User Cases**
Returns details of the credit providers filtered by provided query parameters.
**Parameters:**
* provider_id (list of provider ids separated with ","): The identifiers for the providers for which
user requested
**Example Usage:**
GET /api/credit/v1/providers?provider_id=asu,hogwarts
"response": [
"id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description": "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
**Responses:**
* 200 OK: The request was created successfully. Returned content
is a JSON-encoded dictionary describing what the client should
send to the credit provider.
* 404 Not Found: The provider does not exist.
"""
provider_ids = request.GET.get("provider_ids", None)
providers_list = provider_ids.split(",") if provider_ids else None
providers = api.get_credit_providers(providers_list)
return JsonResponse(providers)
@require_POST
def create_credit_request(request, provider_id):
"""
Initiate a request for credit in a course.
This end-point will get-or-create a record in the database to track
the request. It will then calculate the parameters to send to
the credit provider and digitally sign the parameters, using a secret
key shared with the credit provider.
The user's browser is responsible for POSTing these parameters
directly to the credit provider.
**Example Usage:**
POST /api/credit/v1/providers/hogwarts/request/
{
"username": "ron",
"course_key": "edX/DemoX/Demo_Course"
}
Response: 200 OK
Content-Type: application/json
{
"url": "http://example.com/request-credit",
"method": "POST",
"parameters": {
request_uuid: "557168d0f7664fe59097106c67c3f847"
timestamp: 1434631630,
course_org: "ASUx"
course_num: "DemoX"
course_run: "1T2015"
final_grade: "0.95",
user_username: "john",
user_email: "john@example.com"
user_full_name: "John Smith"
user_mailing_address: "",
user_country: "US",
signature: "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
**Parameters:**
* username (unicode): The username of the user requesting credit.
* course_key (unicode): The identifier for the course for which the user
is requesting credit.
**Responses:**
* 200 OK: The request was created successfully. Returned content
is a JSON-encoded dictionary describing what the client should
send to the credit provider.
* 400 Bad Request:
- The provided course key did not correspond to a valid credit course.
- The user already has a completed credit request for this course and provider.
* 403 Not Authorized:
- The username does not match the name of the logged in user.
- The user is not eligible for credit in the course.
* 404 Not Found:
- The provider does not exist.
"""
response, parameters = _validate_json_parameters(request.body, ["username", "course_key"])
if response is not None:
return response
try:
course_key = CourseKey.from_string(parameters["course_key"])
except InvalidKeyError:
return HttpResponseBadRequest(
u'Could not parse "{course_key}" as a course key'.format(
course_key=parameters["course_key"]
)
)
# Check user authorization
if not (request.user and request.user.username == parameters["username"]):
log.warning(
u'User with ID %s attempted to initiate a credit request for user with username "%s"',
request.user.id if request.user else "[Anonymous]",
parameters["username"]
)
return HttpResponseForbidden("Users are not allowed to initiate credit requests for other users.")
# Initiate the request
try:
credit_request = api.create_credit_request(course_key, provider_id, parameters["username"])
except CreditApiBadRequest as ex:
return HttpResponseBadRequest(ex)
else:
return JsonResponse(credit_request)
@require_POST
@csrf_exempt
def credit_provider_callback(request, provider_id):
"""
Callback end-point used by credit providers to approve or reject
a request for credit.
**Example Usage:**
POST /api/credit/v1/providers/{provider-id}/callback
{
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"status": "approved",
"timestamp": 1434631630,
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
Response: 200 OK
**Parameters:**
* request_uuid (string): The UUID of the request.
* status (string): Either "approved" or "rejected".
* timestamp (int or string): The datetime at which the POST request was made, represented
as the number of seconds since January 1, 1970 00:00:00 UTC.
If the timestamp is a string, it will be converted to an integer.
* signature (string): A digital signature of the request parameters,
created using a secret key shared with the credit provider.
**Responses:**
* 200 OK: The user's status was updated successfully.
* 400 Bad request: The provided parameters were not valid.
Response content will be a JSON-encoded string describing the error.
* 403 Forbidden: Signature was invalid or timestamp was too far in the past.
* 404 Not Found: Could not find a request with the specified UUID associated with this provider.
"""
response, parameters = _validate_json_parameters(request.body, [
"request_uuid", "status", "timestamp", "signature"
])
if response is not None:
return response
# Validate the digital signature of the request.
# This ensures that the message came from the credit provider
# and hasn't been tampered with.
response = _validate_signature(parameters, provider_id)
if response is not None:
return response
# Validate the timestamp to ensure that the request is timely.
response = _validate_timestamp(parameters["timestamp"], provider_id)
if response is not None:
return response
# Update the credit request status
try:
api.update_credit_request_status(parameters["request_uuid"], provider_id, parameters["status"])
except CreditRequestNotFound:
raise Http404
except CreditApiBadRequest as ex:
return HttpResponseBadRequest(ex)
else:
return HttpResponse()
@require_GET
def get_eligibility_for_user(request):
"""
**User Cases**
Retrieve user eligibility against course.
**Parameters:**
* course_key (unicode): Identifier of course.
* username (unicode): Username of current User.
**Example Usage:**
GET /api/credit/v1/eligibility?username=user&course_key=edX/Demo_101/Fall
"response": {
"course_key": "edX/Demo_101/Fall",
"deadline": "2015-10-23"
}
**Responses:**
* 200 OK: The request was created successfully.
* 404 Not Found: The provider does not exist.
"""
course_key = request.GET.get("course_key", None)
username = request.GET.get("username", None)
return JsonResponse(api.get_eligibilities_for_user(username=username, course_key=course_key))
def _validate_json_parameters(params_string, expected_parameters):
"""
Load the request parameters as a JSON dictionary and check that
all required paramters are present.
Arguments:
params_string (unicode): The JSON-encoded parameter dictionary.
expected_parameters (list): Required keys of the parameters dictionary.
Returns: tuple of (HttpResponse, dict)
"""
try:
parameters = json.loads(params_string)
except (TypeError, ValueError):
return HttpResponseBadRequest("Could not parse the request body as JSON."), None
if not isinstance(parameters, dict):
return HttpResponseBadRequest("Request parameters must be a JSON-encoded dictionary."), None
missing_params = set(expected_parameters) - set(parameters.keys())
if missing_params:
msg = u"Required parameters are missing: {missing}".format(missing=u", ".join(missing_params))
return HttpResponseBadRequest(msg), None
return None, parameters
def _validate_signature(parameters, provider_id):
"""
Check that the signature from the credit provider is valid.
Arguments:
parameters (dict): Parameters received from the credit provider.
provider_id (unicode): Identifier for the credit provider.
Returns:
HttpResponseForbidden or None
"""
secret_key = get_shared_secret_key(provider_id)
if secret_key is None:
log.error(
(
u'Could not retrieve secret key for credit provider with ID "%s". '
u'Since no key has been configured, we cannot validate requests from the credit provider.'
), provider_id
)
return HttpResponseForbidden("Credit provider credentials have not been configured.")
if signature(parameters, secret_key) != parameters["signature"]:
log.warning(u'Request from credit provider with ID "%s" had an invalid signature', parameters["signature"])
return HttpResponseForbidden("Invalid signature.")
def _validate_timestamp(timestamp_value, provider_id):
"""
Check that the timestamp of the request is recent.
Arguments:
timestamp (int or string): Number of seconds since Jan. 1, 1970 UTC.
If specified as a string, it will be converted to an integer.
provider_id (unicode): Identifier for the credit provider.
Returns:
HttpResponse or None
"""
timestamp = from_timestamp(timestamp_value)
if timestamp is None:
msg = u'"{timestamp}" is not a valid timestamp'.format(timestamp=timestamp_value)
log.warning(msg)
return HttpResponseBadRequest(msg)
# Check that the timestamp is recent
elapsed_seconds = (datetime.datetime.now(pytz.UTC) - timestamp).total_seconds()
if elapsed_seconds > settings.CREDIT_PROVIDER_TIMESTAMP_EXPIRATION:
log.warning(
(
u'Timestamp %s is too far in the past (%s seconds), '
u'so we are rejecting the notification from the credit provider "%s".'
),
timestamp_value, elapsed_seconds, provider_id,
)
return HttpResponseForbidden(u"Timestamp is too far in the past.")
class CreditCourseViewSet(PutAsCreateMixin, mixins.UpdateModelMixin, viewsets.ReadOnlyModelViewSet):
""" CreditCourse endpoints. """
lookup_field = 'course_key'
lookup_value_regex = settings.COURSE_KEY_REGEX
queryset = CreditCourse.objects.all()
serializer_class = CreditCourseSerializer
authentication_classes = (OAuth2Authentication, SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser)
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
# This CSRF exemption only applies when authenticating without SessionAuthentication.
# SessionAuthentication will enforce CSRF protection.
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CreditCourseViewSet, self).dispatch(request, *args, **kwargs)
def get_object(self):
# Convert the serialized course key into a CourseKey instance
# so we can look up the object.
course_key = self.kwargs.get(self.lookup_field)
if course_key is not None:
self.kwargs[self.lookup_field] = CourseKey.from_string(course_key)
return super(CreditCourseViewSet, self).get_object()
| agpl-3.0 |
albertomurillo/ansible | lib/ansible/modules/network/aci/mso_schema_site_anp_epg_subnet.py | 18 | 8303 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_anp_epg_subnet
short_description: Manage site-local EPG subnets in schema template
description:
- Manage site-local EPG subnets in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
epg:
description:
- The name of the EPG.
type: str
subnet:
description:
- The IP range in CIDR notation.
type: str
required: true
aliases: [ ip ]
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- The ACI MultiSite PATCH API has a deficiency requiring some objects to be referenced by index.
This can cause silent corruption on concurrent access when changing/removing on object as
the wrong object may be referenced. This module is affected by this deficiency.
seealso:
- module: mso_schema_site_anp_epg
- module: mso_schema_template_anp_epg_subnet
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new subnet to a site EPG
mso_schema_site_anp_epg_subnet:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
subnet: 10.0.0.0/24
state: present
delegate_to: localhost
- name: Remove a subnet from a site EPG
mso_schema_site_anp_epg_subnet:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
subnet: 10.0.0.0/24
state: absent
delegate_to: localhost
- name: Query a specific site EPG subnet
mso_schema_site_anp_epg_subnet:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
subnet: 10.0.0.0/24
state: query
delegate_to: localhost
register: query_result
- name: Query all site EPG subnets
mso_schema_site_anp_epg_subnet:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_subnet_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
argument_spec.update(mso_subnet_spec())
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['subnet']],
['state', 'present', ['subnet']],
],
)
schema = module.params['schema']
site = module.params['site']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
subnet = module.params['subnet']
description = module.params['description']
scope = module.params['scope']
shared = module.params['shared']
no_default_gateway = module.params['no_default_gateway']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj['id']
# Get site
site_id = mso.lookup_site(site)
# Get site_idx
sites = [(s['siteId'], s['templateName']) for s in schema_obj['sites']]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get ANP
anp_ref = mso.anp_ref(schema_id=schema_id, template=template, anp=anp)
anps = [a['anpRef'] for a in schema_obj['sites'][site_idx]['anps']]
if anp_ref not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp_ref)
# Get EPG
epg_ref = mso.epg_ref(schema_id=schema_id, template=template, anp=anp, epg=epg)
epgs = [e['epgRef'] for e in schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs']]
if epg_ref not in epgs:
mso.fail_json(msg="Provided epg '{0}' does not exist. Existing epgs: {1}".format(epg, ', '.join(epgs)))
epg_idx = epgs.index(epg_ref)
# Get Subnet
subnets = [s['ip'] for s in schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets']]
if subnet in subnets:
subnet_idx = subnets.index(subnet)
# FIXME: Changes based on index are DANGEROUS
subnet_path = '/sites/{0}/anps/{1}/epgs/{2}/subnets/{3}'.format(site_template, anp, epg, subnet_idx)
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets'][subnet_idx]
if state == 'query':
if subnet is None:
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['subnets']
elif not mso.existing:
mso.fail_json(msg="Subnet '{subnet}' not found".format(subnet=subnet))
mso.exit_json()
subnets_path = '/sites/{0}/anps/{1}/epgs/{2}/subnets'.format(site_template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=subnet_path))
elif state == 'present':
if not mso.existing:
if description is None:
description = subnet
if scope is None:
scope = 'private'
if shared is None:
shared = False
if no_default_gateway is None:
no_default_gateway = False
payload = dict(
ip=subnet,
description=description,
scope=scope,
shared=shared,
noDefaultGateway=no_default_gateway,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=subnet_path, value=mso.sent))
else:
ops.append(dict(op='add', path=subnets_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
EricSB/nupic | examples/tp/tp_constant_test.py | 5 | 4997 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests that we can learn and predict the particularly vexing case of a
single constant signal!
"""
import numpy as np
import unittest2 as unittest
from nupic.research import fdrutilities as fdrutils
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
from nupic.support.unittesthelpers.testcasebase import (TestCaseBase,
TestOptionParser)
def _printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = np.zeros(numCols, dtype='float32')
x[i*numOnes:(i + 1)*numOnes] = 1
p.append(x)
return p
def _createTps(numCols):
"""Create two instances of temporal poolers (TP.py and TP10X2.py) with
identical parameter settings."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 5
newSynapseCount = 7
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
globalDecay = 0
cellsPerColumn = 1
cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency=True,
pamLength=1000)
# Ensure we are copying over learning states for TPDiff
cppTp.retrieveLearningStates = True
pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTp, pyTp
class TPConstantTest(TestCaseBase):
def setUp(self):
self.cppTp, self.pyTp = _createTps(100)
def _basicTest(self, tp=None):
"""Test creation, pickling, and basic run of learning and inference."""
trainingSet = _getSimplePatterns(10, 10)
# Learn on several constant sequences, with a reset in between
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tp.learn(seq)
tp.reset()
print "Learning completed"
# Infer
print "Running inference"
tp.collectStats = True
for seq in trainingSet[0:5]:
tp.reset()
tp.resetStats()
for _ in range(10):
tp.infer(seq)
if VERBOSITY > 1 :
print
_printOneTrainingVector(seq)
tp.printStates(False, False)
print
print
if VERBOSITY > 1:
print tp.getStats()
# Ensure our predictions are accurate for each sequence
self.assertGreater(tp.getStats()['predictionScoreAvg2'], 0.8)
print ("tp.getStats()['predictionScoreAvg2'] = ",
tp.getStats()['predictionScoreAvg2'])
print "TPConstant basicTest ok"
def testCppTpBasic(self):
self._basicTest(self.cppTp)
def testPyTpBasic(self):
self._basicTest(self.pyTp)
def testIdenticalTps(self):
self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp))
if __name__=="__main__":
parser = TestOptionParser()
options, _ = parser.parse_args()
SEED = options.seed
VERBOSITY = options.verbosity
np.random.seed(SEED)
unittest.main()
| agpl-3.0 |
regit/nufw | tests/test_conntrack.py | 1 | 3027 | #!/usr/bin/python
from unittest import TestCase, main
from common import createClient, connectClient, startNufw
from nuauth import Nuauth
from nuauth_conf import NuauthConf
from inl_tests.iptables import Iptables
from filter import HOST, VALID_PORT
from plaintext import USERDB, PlaintextAcl, PlainPeriodXML, Period
from IPy import IP
import time, sys, os, socket, commands, pynetfilter_conntrack
def get_conntrack_conn(src_port, dest, port_dest):
if pynetfilter_conntrack.__revision__ == '0.4.2':
nf = pynetfilter_conntrack.Conntrack()
table = nf.dump_table(socket.AF_INET)
conn_list = []
for conn in table:
if src_port == conn.orig_port_src and IP(dest) == IP(conn.orig_ipv4_dst):
conn_list.append(str(conn))
return conn_list
else:
nf = pynetfilter_conntrack.NetfilterConntrack(pynetfilter_conntrack.CONNTRACK)
table = nf.create_table(socket.AF_INET)
table = table.filter(6, orig_dst = IP(dest), orig_dst_port = VALID_PORT, orig_src_port = src_port)
return table
class TestConntrack(TestCase):
def setUp(self):
self.dst_host = socket.gethostbyname(HOST)
self.config = NuauthConf()
self.acls = PlaintextAcl()
self.acls.addAclFull("web", self.dst_host, VALID_PORT, USERDB[0].gid, 1, period='10 secs' )
self.acls.install(self.config)
self.period = PlainPeriodXML()
self.period.addPeriod(Period("10 secs", duration = 10))
self.period.install(self.config)
self.users = USERDB
self.users.install(self.config)
self.nuauth = Nuauth(self.config)
self.nufw = startNufw()
self.iptables = Iptables()
self.iptables.flush()
self.iptables.command('-I OUTPUT -d %s -p tcp --dport 80 --syn -m state --state NEW -j NFQUEUE' % self.dst_host)
self.iptables.command('-I OUTPUT -d %s -p tcp --dport 80 ! --syn -m state --state NEW -j DROP' % self.dst_host)
def tearDown(self):
self.nuauth.stop()
self.users.desinstall()
self.acls.desinstall()
self.period.desinstall()
def testConnShutdown(self):
user = USERDB[0]
client = user.createClient()
self.assert_(connectClient(client))
start = time.time()
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((self.dst_host, VALID_PORT))
src_port = conn.getsockname()[1]
ct_before = len(get_conntrack_conn(src_port, self.dst_host, VALID_PORT))
## Check that only one connection is opened to
self.assert_(ct_before == 1)
## The connection should be killed 10 seconds after being opened
time.sleep(15)
## Check that only one connection is opened to
ct_after = len(get_conntrack_conn(src_port, self.dst_host, VALID_PORT))
self.assert_(ct_after == 0)
conn.close()
client.stop()
if __name__ == "__main__":
print "Test conntrack functionnalities"
main()
| gpl-3.0 |
onelab-eu/myslice | portal/sliceresourceview.py | 1 | 18321 | import json
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.views.generic.base import TemplateView
from django.http import HttpResponse
from django.shortcuts import render
from manifold.core.query import Query, AnalyzedQuery
from manifoldapi.manifoldapi import execute_query
from unfold.loginrequired import LoginRequiredView
from unfold.page import Page
from myslice.configengine import ConfigEngine
from plugins.apply import ApplyPlugin
from plugins.querytable import QueryTable
from plugins.googlemap import GoogleMap
# from plugins.queryupdater import QueryUpdaterPlugin
from plugins.filter_status import FilterStatusPlugin
from plugins.testbeds import TestbedsPlugin
from plugins.scheduler2 import Scheduler2
from plugins.asap import AsapPlugin
# Bristol plugin
from plugins.univbris import Univbris
from plugins.univbrisfoam import UnivbrisFoam
from plugins.univbrisfv import UnivbrisFv
from plugins.univbrisfvf import UnivbrisFvf
from plugins.univbrisfvfo import UnivbrisFvfo
from plugins.univbristopo import UnivbrisTopo
from plugins.univbrisvtam import UnivbrisVtam as UnivbrisVtamPlugin
from plugins.univbrisvtamform import UnivbrisVtamForm
from plugins.columns_editor import ColumnsEditor
from plugins.sladialog import SlaDialog
from plugins.lists.simplelist import SimpleList
from myslice.theme import ThemeView
from myslice.settings import logger
class SliceResourceView (LoginRequiredView, ThemeView):
template_name = "slice-resource-view.html"
def get(self, request, slicename):
if request.GET.get('message') :
msg = "Slice successfully updated"
else :
msg = None
page = Page(request)
metadata = page.get_metadata()
page.expose_js_metadata()
resource_md = metadata.details_by_object('resource')
resource_fields = [column['name'] for column in resource_md['column']]
user_md = metadata.details_by_object('user')
user_fields = ['user_hrn'] # [column['name'] for column in user_md['column']]
query_resource_all = Query.get('resource').select(resource_fields)
#page.enqueue_query(query_resource_all)
# leases query
#lease_md = metadata.details_by_object('lease')
#lease_fields = [column['name'] for column in lease_md['column']]
#query_lease_all = Query.get('lease').select(lease_fields)
#page.enqueue_query(query_lease_all)
slice_md = metadata.details_by_object('slice')
slice_fields = [column['name'] for column in slice_md['column']]
logger.debug("SLICE RES VIEW fields = {}".format(slice_fields))
# TODO The query to run is embedded in the URL
# Example: select slice_hrn, resource.urn, lease.resource, lease.start_time, lease.end_time from slice where slice_hrn == "ple.upmc.myslicedemo"
main_query = Query.get('slice').filter_by('slice_hrn', '=', slicename)
main_query.select(slice_fields)
# Columns shown by default in Query_table plugin
page.expose_js_var("QUERYTABLE_MAP","{'Resource name': 'hostname', 'Type': 'type', 'Facility': 'facility_name','Testbed': 'testbed_name', 'Available':'available'}")
# Columns checked by default in Columns_selector plugin
query_default_fields = ['hostname', 'type', 'facility_name', 'testbed_name', 'available']
QUERYTABLE_MAP = {
'hostname' : 'Resource name',
'type' : 'Type',
'facility_name' : 'Facility',
'testbed_name' : 'Testbed',
'available' : 'Available',
}
# # SLICE
# 'slice_hrn',
# # - The record key is needed otherwise the storage of records
# # bugs !
# 'slice_urn',
# # RESOURCES
# 'resource',
# 'lease',
# 'resource.urn',
# 'resource.hostname', 'resource.type',
# # - The facility_name and testbed_name are required for the
# # testbeds plugin to properly work.
# 'resource.facility_name',
# 'resource.testbed_name',
# # LEASES
# 'lease.resource',
# 'lease.start_time',
# 'lease.end_time',
# # - The lease_id is important for NITOS identify already existing
# # leases
# 'lease.lease_id',
# # FLOWSPACE
# #'flowspace',
# # VMS
# #'vms',
# #'user.user_hrn',
# #'application.measurement_point.counter'
#)
# for internal use in the querytable plugin;
# needs to be a unique column present for each returned record
main_query_init_key = 'urn'
aq = AnalyzedQuery(main_query, metadata=metadata)
page.enqueue_query(main_query, analyzed_query=aq)
sq_resource = aq.subquery('resource')
sq_lease = aq.subquery('lease')
#sq_flowspace = aq.subquery('flowspace')
#sq_vms = aq.subquery('vms')
# --------------------------------------------------------------------------
# ALL RESOURCES LIST
# resources as a list using datatable plugin
list_resources = QueryTable(
page = page,
domid = 'resources-list',
title = 'List view',
query = sq_resource,
query_all = query_resource_all,
mapping = QUERYTABLE_MAP,
default_fields = query_default_fields,
init_key = "urn",
checkboxes = True,
datatables_options = {
'iDisplayLength': 25,
'bLengthChange' : True,
'bAutoWidth' : True,
},
)
# --------------------------------------------------------------------------
# RESERVED RESOURCES LIST
# resources as a list using datatable plugin
list_reserved_resources = SimpleList(
title = None,
page = page,
key = 'urn',
query = sq_resource,
)
list_reserved_leases = SimpleList(
title = None,
page = page,
key = 'resource',
query = sq_lease,
)
# list_reserved_resources = QueryTable(
# page = page,
# domid = 'resources-reserved-list',
# title = 'List view',
# query = sq_resource,
# query_all = sq_resource,
# init_key = "urn",
# checkboxes = True,
# datatables_options = {
# 'iDisplayLength': 25,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# --------------------------------------------------------------------------
# COLUMNS EDITOR
# list of fields to be applied on the query
# this will add/remove columns in QueryTable plugin
filter_column_editor = ColumnsEditor(
page = page,
query = sq_resource,
query_all = query_resource_all,
default_fields = query_default_fields,
title = "Select Columns",
domid = 'select-columns',
)
# --------------------------------------------------------------------------
# RESOURCES MAP
# the resources part is made of a Tabs (Geographic, List),
map_resources = GoogleMap(
page = page,
title = 'Geographic view',
domid = 'resources-map',
# tab's sons preferably turn this off
togglable = False,
query = sq_resource,
# this key is the one issued by google
googlemap_api_key = ConfigEngine().googlemap_api_key(),
# the key to use at init-time
init_key = main_query_init_key,
checkboxes = True,
# To center around Europe : 53,9 / 3
latitude = 53.,
longitude = 9.,
zoom = 3,
)
# --------------------------------------------------------------------------
# LEASES Nitos Scheduler
# Display the leases reservation timeslots of the resources
resources_as_scheduler2 = Scheduler2(
page = page,
domid = 'scheduler',
title = 'Scheduler',
# this is the query at the core of the slice list
query = sq_resource,
query_lease = sq_lease,
)
# --------------------------------------------------------------------------
# LEASES Asap Scheduler
# Select an end_time for all unconfigured resources
# start_time is as soon as possible
#resources_as_asap = AsapPlugin(
# page = page,
# domid = 'asap',
# title = 'Asap',
# # this is the query at the core of the slice list
# query = sq_resource,
# query_lease = sq_lease,
#)
# --------------------------------------------------------------------------
# QueryUpdater (Pending Operations)
# pending_resources = QueryUpdaterPlugin(
# page = page,
# title = 'Pending operations',
# query = main_query,
# togglable = False,
# # start turned off, it will open up itself when stuff comes in
# toggled = False,
# domid = 'pending',
# outline_complete = True,
# username = request.user,
# )
# --------------------------------------------------------------------------
# NETWORKS
# testbeds as a list of filters
network_md = metadata.details_by_object('network')
network_fields = [column['name'] for column in network_md['column']]
#query_networks = Query.get('network').select(network_fields)
#page.enqueue_query(query_networks)
filter_testbeds = TestbedsPlugin(
page = page,
domid = 'testbeds-filter',
title = 'Filter by testbeds',
query = sq_resource,
#query_networks = query_networks,
#init_key = "network_hrn",
#checkboxes = True,
#datatables_options = {
# 'iDisplayLength': 25,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
)
filter_status = FilterStatusPlugin(
page = page,
domid = "filter-status",
query = sq_resource,
query_lease = sq_lease,
)
apply = ApplyPlugin(
page = page,
domid = "apply",
query = main_query,
username = request.user,
)
# --------------------------------------------------------------------------
# Ofelia OpenFlow Plugin
# Bristol plugin
# plugin which display a "gathering resources" message
# waiting for all resources to be returned by manifold
# univbriswelcome = Univbris(
# page = page,
# title = 'univbris_welcome',
# domid = 'univbris_welcome',
# query = query_resource_all,
# )
# univbrisfoamlist = UnivbrisFoam(
# page = page,
# title = 'univbris_foam_ports_selection',
# domid = 'univbris_foam_ports_selection',
# query = query_resource_all,
# query_all = query_resource_all,
# checkboxes = False,
# datatables_options = {
# 'iDisplayLength': 10,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# #plugin which manages the different flowspaces that the user creates, and also sends flowspaces to manifold
# univbrisfvlist = UnivbrisFv(
# page = page,
# title = 'univbris_flowspace_selection',
# domid = 'univbris_flowspace_selection',
# query = sq_flowspace,
# query_all = query_resource_all,
# datatables_options = {
# 'iDisplayLength': 5,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# #plugin which allows the definition of a single flowspace
# univbrisfvform = UnivbrisFvf(
# page = page,
# title = 'univbris_flowspace_form',
# domid = 'univbris_flowspace_form',
# query = query_resource_all,
# query_all = None,
# datatables_options = {
# 'iDisplayLength': 3,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# #plugin which allows the definition the match criteria on a single OPTICAL flowspace
# univbrisofvform = UnivbrisFvfo(
# page = page,
# title = 'univbris_oflowspace_form',
# domid = 'univbris_oflowspace_form',
# query = None,
# query_all = None,
# datatables_options = {
# 'iDisplayLength': 3,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# #plugin which display the gathered topology
# univbristopology = UnivbrisTopo(
# page = page,
# title = 'univbris_topology',
# domid = 'univbris_topology',
# query = query_resource_all,
# )
# # --------------------------------------------------------------------------
# # Ofelia VTAM Plugin
# # Bristol Plugin
# #plugin which display a table where an experimenter will add VMs to according to his needs
# # responsible to send the data to Manifold
# univbrisvtamplugin = UnivbrisVtamPlugin(
# page = page,
# title = 'univbris_vtam',
# domid = 'univbris_vtam',
# query = sq_vms,
# #query = sq_resource,
# )
# #plugin which display a form where an experimenter will specify
# # in which testbed and which physical server to setup the VM
# univbrisvtamform = UnivbrisVtamForm(
# page = page,
# title = 'univbris_vtam_form',
# domid = 'univbris_vtam_form',
# query = query_resource_all,
# query_all = None,
# datatables_options = {
# 'iDisplayLength': 3,
# 'bLengthChange' : True,
# 'bAutoWidth' : True,
# },
# )
# --------------------------------------------------------------------------
# SLA View and accept dialog
sla_dialog = SlaDialog(
page = page,
title = 'sla dialog',
query = main_query,
#togglable = False,
# start turned off, it will open up itself when stuff comes in
#toggled = True,
domid = 'sla_dialog',
#outline_complete = True,
username = request.user,
)
template_env = {}
template_env['request'] = self.request
template_env['list_resources'] = list_resources.render(self.request)
template_env['list_reserved_resources'] = list_reserved_resources.render(self.request)
template_env['list_reserved_leases'] = list_reserved_leases.render(self.request)
template_env['columns_editor'] = filter_column_editor.render(self.request)
template_env['filter_testbeds'] = filter_testbeds.render(self.request)
template_env['filter_status'] = filter_status.render(self.request)
template_env['apply'] = apply.render(self.request)
template_env['map_resources'] = map_resources.render(self.request)
template_env['scheduler'] = resources_as_scheduler2.render(self.request)
#template_env['asap'] = resources_as_asap.render(self.request)
# Bristol plugin
# template_env['welcome'] = univbriswelcome.render(self.request)
# template_env['resources'] = univbrisfoamlist.render(self.request)
# template_env['flowspaces'] = univbrisfvlist.render(self.request)
# template_env['oflowspaces_form'] = univbrisofvform.render(self.request)
# template_env['flowspaces_form'] = univbrisfvform.render(self.request)
# template_env['topology'] = univbristopology.render(self.request)
# template_env['vms_list'] = univbrisvtamplugin.render(self.request)
# template_env['vm_form'] = univbrisvtamform.render(self.request)
# template_env['pending_resources'] = pending_resources.render(self.request)
template_env['sla_dialog'] = sla_dialog.render(self.request)
template_env["theme"] = self.theme
template_env["username"] = request.user
template_env["slice"] = slicename
template_env["section"] = "resources"
template_env["msg"] = msg
template_env.update(page.prelude_env())
return render_to_response(self.template, template_env, context_instance=RequestContext(request))
| gpl-3.0 |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/django/contrib/admin/sites.py | 42 | 19554 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
"""
return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(
self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
has_permission=False,
**(extra_context or {})
),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(
self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
username=request.user.get_username(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(
self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| gpl-3.0 |
darthbhyrava/pywikibot-local | pywikibot/tools/__init__.py | 2 | 55376 | # -*- coding: utf-8 -*-
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '$Id$'
import collections
import gzip
import inspect
import re
import subprocess
import sys
import threading
import time
import types
from distutils.version import Version
from warnings import warn
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if not PY2:
import queue as Queue
StringTypes = basestring = (str,)
UnicodeType = unicode = str
else:
import Queue
StringTypes = types.StringTypes
UnicodeType = types.UnicodeType
from pywikibot.logging import debug
try:
import bz2
except ImportError as bz2_import_error:
try:
import bz2file as bz2
warn('package bz2 was not found; using bz2file', ImportWarning)
except ImportError:
warn('package bz2 and bz2file were not found', ImportWarning)
bz2 = bz2_import_error
if PYTHON_VERSION < (3, 5):
# although deprecated in 3 completely no message was emitted until 3.5
ArgSpec = inspect.ArgSpec
getargspec = inspect.getargspec
else:
ArgSpec = collections.namedtuple('ArgSpec', ['args', 'varargs', 'keywords',
'defaults'])
def getargspec(func):
"""Python 3 implementation using inspect.signature."""
sig = inspect.signature(func)
args = []
defaults = []
varargs = None
kwargs = None
for p in sig.parameters.values():
if p.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = p.name
elif p.kind == inspect.Parameter.VAR_KEYWORD:
kwargs = p.name
else:
args += [p.name]
if p.default != inspect.Parameter.empty:
defaults += [p.default]
if defaults:
defaults = tuple(defaults)
else:
defaults = None
return ArgSpec(args, varargs, kwargs, defaults)
_logger = 'tools'
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
class NotImplementedClass(object):
"""No implementation is available."""
def __init__(self, *args, **kwargs):
"""Constructor."""
raise NotImplementedError(
'%s: %s' % (self.__class__.__name__, self.__doc__))
if PYTHON_VERSION < (2, 7):
try:
import future.backports.misc
except ImportError:
warn("""
pywikibot support of Python 2.6 relies on package future for many features.
Please upgrade to Python 2.7+ or Python 3.3+, or run:
"pip install future>=0.15.0"
""", RuntimeWarning)
try:
from ordereddict import OrderedDict
except ImportError:
class OrderedDict(NotImplementedClass):
"""OrderedDict not found."""
pass
try:
from counter import Counter
except ImportError:
class Counter(NotImplementedClass):
"""Counter not found."""
pass
count = None
else:
Counter = future.backports.misc.Counter
OrderedDict = future.backports.misc.OrderedDict
try:
count = future.backports.misc.count
except AttributeError:
warn('Please update the "future" package to at least version '
'0.15.0 to use its count.', RuntimeWarning, 2)
count = None
del future
if count is None:
def count(start=0, step=1):
"""Backported C{count} to support keyword arguments and step."""
while True:
yield start
start += step
else:
from collections import Counter # noqa ; unused
from collections import OrderedDict
from itertools import count # noqa ; unused
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
class UnicodeMixin(object):
"""Mixin class to add __str__ method in Python 2 or 3."""
if not PY2:
def __str__(self):
"""Return the unicode representation as the str representation."""
return self.__unicode__()
else:
def __str__(self):
"""Return the str representation of the UTF-8 encoded Unicode."""
return self.__unicode__().encode('utf8')
# From http://python3porting.com/preparing.html
class ComparableMixin(object):
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if self is less than other."""
return other > self._cmpkey()
def __le__(self, other):
"""Compare if self is less equals other."""
return other >= self._cmpkey()
def __eq__(self, other):
"""Compare if self is equal to other."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if self is greater equals other."""
return other <= self._cmpkey()
def __gt__(self, other):
"""Compare if self is greater than other."""
return other < self._cmpkey()
def __ne__(self, other):
"""Compare if self is not equal to other."""
return other != self._cmpkey()
class DotReadableDict(UnicodeMixin):
"""Parent class of Revision() and FileInfo().
Provide:
- __getitem__(), __unicode__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __unicode__(self):
"""Return string representation."""
# TODO: This is more efficient if the PY2 test is done during
# class instantiation, and not inside the method.
if not PY2:
return repr(self.__dict__)
else:
_content = u', '.join(
u'{0}: {1}'.format(k, v) for k, v in self.__dict__.items())
return u'{{{0}}}'.format(_content)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class FrozenDict(dict):
"""
Frozen dict, preventing write after initialisation.
Raises TypeError if write attempted.
"""
def __init__(self, data=None, error=None):
"""
Constructor.
@param data: mapping to freeze
@type data: mapping
@param error: error message
@type error: basestring
"""
if data:
args = [data]
else:
args = []
super(FrozenDict, self).__init__(*args)
self._error = error or 'FrozenDict: not writable'
def update(self, *args, **kwargs):
"""Prevent updates."""
raise TypeError(self._error)
__setitem__ = update
def concat_options(message, line_length, options):
"""Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = u''
option_line = u''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return u'{0} ({1}):'.format(message, option_msg)
class LazyRegex(object):
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Constructor.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super(LazyRegex, self).__init__()
@property
def raw(self):
"""Get raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
"""Set raw property."""
self._raw = value
self._compiled = None
@property
def flags(self):
"""Get flags property."""
return self._flags
@flags.setter
def flags(self, value):
"""Set flags property."""
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None):
"""
Constructor.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super(DeprecatedRegex, self).__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, 2)
return super(DeprecatedRegex, self).__getattr__(attr)
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string):
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].upper() + string[1:]
def normalize_username(username):
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2
< 1.25-rc-1 < 1.25-rc.2 < 1.25
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(
r'^(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$')
@classmethod
def from_generator(cls, generator):
"""Create instance using the generator string."""
if not generator.startswith('MediaWiki '):
raise ValueError('Generator string ({0!r}) must start with '
'"MediaWiki "'.format(generator))
return cls(generator[len('MediaWiki '):])
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{0}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) in ('alpha', '-alpha'):
self._dev_version = (1, )
else:
for handled in ('wmf', 'alpha', 'beta', 'rc'):
# if any of those pops up here our parser has failed
assert handled not in version_match.group(2), \
'Found "{0}" in "{1}"'.format(handled, version_match.group(2))
if version_match.group(2):
debug('Additional unused version part '
'"{0}"'.format(version_match.group(2)),
_logger)
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, basestring):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
if PY2:
__cmp__ = _cmp
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name="GeneratorThread",
args=(), kwargs=None, qsize=65536):
"""Constructor. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, "generator"):
raise RuntimeError("No generator for ThreadedGenerator to run.")
self.args, self.kwargs = args, kwargs
threading.Thread.__init__(self, group=group, name=name)
self.queue = Queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.isAlive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except Queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any([hasattr(self.generator, key)
for key in ['__iter__', '__getitem__']])
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except Queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = "threadlist"
def __init__(self, limit=128, *args):
"""Constructor."""
self.limit = limit
super(ThreadList, self).__init__(*args)
for item in self:
if not isinstance(threading.Thread, item):
raise TypeError("Cannot add '%s' to ThreadList" % type(item))
def active_count(self):
"""Return the number of alive threads, and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.isAlive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '%s' to ThreadList" % type(thd))
while self.active_count() >= self.limit:
time.sleep(2)
super(ThreadList, self).append(thd)
thd.start()
debug("thread %d ('%s') started" % (len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug(u'EARLY QUIT: Threads: %d' % len(self), self._logger)
for thd in self:
thd.stop()
debug(u'EARLY QUIT: Queue size left in %s: %s'
% (thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({0!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
cache = collections.defaultdict(set)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
threaded_gen.daemon = True
thrlist.append(threaded_gen)
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
# Cache entry is a set of thread.
# Duplicates from same thread are not counted twice.
cache[item].add(t)
if len(cache[item]) == n_gen:
yield item
# Remove item from cache.
# No chance of seeing it again (see later: early stop).
cache.pop(item)
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except Queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
finally:
# All threads are done.
if thrlist.active_count() == 0:
return
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into
the keys of a dict created as a local variable, each with a value of True.
It only yields items which are not already present in the local dict.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local dict which can not be cleared.
Also, the local dict cant be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = {}
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
try:
if (key(item) if key else item) not in container:
add(item)
yield item
except StopIteration:
return
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, collections.Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it savely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in an combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def _empty_iter(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
iteritems = itervalues = iterkeys = __iter__ = _empty_iter
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin(object):
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {0}'.format(self._own_desc),
'it directly', 2)
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
class SelfCallString(SelfCallMixin, str):
"""Unicode string with SelfCallMixin."""
class IteratorNextMixin(collections.Iterator):
"""Backwards compatibility for Iterators."""
if PY2:
def next(self):
"""Python 2 next."""
return self.__next__()
class DequeGenerator(IteratorNextMixin, collections.deque):
"""A generator that allows items to be added during generating."""
def __next__(self):
"""Python 3 iterator method."""
if len(self):
return self.popleft()
else:
raise StopIteration
class ContextManagerWrapper(object):
"""
Wraps an object in a context manager.
It is redirecting all access to the wrapped object and executes 'close' when
used as a context manager in with-statements. In such statements the value
set via 'as' is directly the wrapped object. For example:
>>> class Wrapper(object):
... def close(self): pass
>>> an_object = Wrapper()
>>> wrapped = ContextManagerWrapper(an_object)
>>> with wrapped as another_object:
... assert another_object is an_object
It does not subclass the object though, so isinstance checks will fail
outside a with-statement.
"""
def __init__(self, wrapped):
"""Create a new wrapper."""
super(ContextManagerWrapper, self).__init__()
super(ContextManagerWrapper, self).__setattr__('_wrapped', wrapped)
def __enter__(self):
"""Enter a context manager and use the wrapped object directly."""
return self._wrapped
def __exit__(self, exc_type, exc_value, traceback):
"""Call close on the wrapped object when exiting a context manager."""
self._wrapped.close()
def __getattr__(self, name):
"""Get the attribute from the wrapped object."""
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
"""Set the attribute in the wrapped object."""
setattr(self._wrapped, name, value)
def open_archive(filename, mode='rb', use_extension=True):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip and 7zip as compression containers. It
uses the packages available in the standard library for bzip2 and gzip so
they are always available. 7zip is only available when a 7za program is
available and only supports reading from it.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default True). Must be True when
writing or appending.
@type use_extension: bool
@param mode: The mode in which the file should be opened. It may either be
'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary
mode. It defaults to 'rb'.
@type mode: string
@raises ValueError: When 7za is not available or the opening mode is unknown
or it tries to write a 7z archive.
@raises FileNotFoundError: When the filename doesn't exist and it tries
to read from it or it tries to determine the compression algorithm (or
IOError on Python 2).
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@return: A file-like object returning the uncompressed data in binary mode.
Before Python 2.7 the GzipFile object and before 2.7.1 the BZ2File are
wrapped in a ContextManagerWrapper with its advantages/disadvantages.
@rtype: file-like object
"""
def wrap(wrapped, sub_ver):
"""Wrap in a wrapper when this is below Python version 2.7."""
if PYTHON_VERSION < (2, 7, sub_ver):
return ContextManagerWrapper(wrapped)
else:
return wrapped
if mode in ('r', 'a', 'w'):
mode += 'b'
elif mode not in ('rb', 'ab', 'wb'):
raise ValueError('Invalid mode: "{0}"'.format(mode))
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
if mode != 'rb':
raise ValueError('Magic number detection only when reading')
with open(filename, 'rb') as f:
magic_number = f.read(8)
if magic_number.startswith(b'BZh'):
extension = 'bz2'
elif magic_number.startswith(b'\x1F\x8B\x08'):
extension = 'gz'
elif magic_number.startswith(b"7z\xBC\xAF'\x1C"):
extension = '7z'
else:
extension = ''
if extension == 'bz2':
if isinstance(bz2, ImportError):
raise bz2
return wrap(bz2.BZ2File(filename, mode), 1)
elif extension == 'gz':
return wrap(gzip.open(filename, mode), 0)
elif extension == '7z':
if mode != 'rb':
raise NotImplementedError('It is not possible to write a 7z file.')
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed and can not '
'uncompress "{0}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if b'Everything is Ok' not in stderr:
process.stdout.close()
# OSError is also raised when bz2 is invalid
raise OSError('Invalid 7z archive.')
else:
return process.stdout
else:
# assume it's an uncompressed file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and making sure that the original dicts had unique keys.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: '
'{0}'.format(', '.join(sorted(unicode(key) for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def signature(obj):
"""
Safely return function Signature object (PEP 362).
inspect.signature was introduced in 3.3, however backports are available.
In Python 3.3, it does not support all types of callables, and should
not be relied upon. Python 3.4 works correctly.
Any exception calling inspect.signature is ignored and None is returned.
@param obj: Function to inspect
@type obj: callable
@rtype: inpect.Signature or None
"""
try:
return inspect.signature(obj)
except (AttributeError, ValueError):
return None
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = (obj.__module__ + '.' +
class_name + '.' +
obj.__name__)
else:
obj.__full_name__ = (obj.__module__ + '.' +
obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def issue_deprecation_warning(name, instead, depth, warning_class=None):
"""Issue a deprecation warning."""
if instead:
if warning_class is None:
warning_class = DeprecationWarning
warn(u'{0} is deprecated; use {1} instead.'.format(name, instead),
warning_class, depth + 1)
else:
if warning_class is None:
warning_class = _NotImplementedWarning
warn('{0} is deprecated.'.format(name), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: string
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@type args: list
@param kwargs: kwargs passed to the decorated function.
@type kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(name, instead, depth)
return obj(*args, **kwargs)
def add_docstring(wrapper):
"""Add a Deprecated notice to the docstring."""
deprecation_notice = 'Deprecated'
if instead:
deprecation_notice += '; use ' + instead + ' instead'
deprecation_notice += '.\n\n'
if wrapper.__doc__: # Append old docstring after the notice
wrapper.__doc__ = deprecation_notice + wrapper.__doc__
else:
wrapper.__doc__ = deprecation_notice
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
# Regular expression to find existing deprecation notices
deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]',
re.IGNORECASE)
# Add the deprecation notice to the docstring if not present
if not wrapper.__doc__:
add_docstring(wrapper)
else:
if not deprecated_notice.search(wrapper.__doc__):
add_docstring(wrapper)
else:
# Get docstring up to @params so deprecation notices for
# parameters don't disrupt it
trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL)
trimmed_doc = trim_params.match(wrapper.__doc__).group(0)
if not deprecated_notice.search(trimmed_doc): # No notice
add_docstring(wrapper)
return wrapper
without_parameters = len(args) == 1 and len(kwargs) == 0 and callable(args[0])
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
else:
return decorator
def deprecate_arg(old_arg, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg."""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""
Decorator to declare multiple args deprecated.
@param arg_pairs: Each entry points to the new argument name. With True or
None it drops the value and prints a warning. If False it just drops
the value.
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg in __kw:
if new_arg not in [True, False, None]:
if new_arg in __kw:
warn(u"%(new_arg)s argument of %(name)s "
u"replaces %(old_arg)s; cannot use both."
% output_args,
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn(u"%(old_arg)s argument of %(name)s "
u"is deprecated; use %(new_arg)s instead."
% output_args,
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
else:
if new_arg is False:
cls = PendingDeprecationWarning
else:
cls = DeprecationWarning
warn(u"%(old_arg)s argument of %(name)s is deprecated."
% output_args,
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
# __signature__ is only available in Python 3 which has OrderedDict
params = OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of ' + new_arg + ']'
if new_arg not in [True, False, None]
else NotImplemented)
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use C{*args} or C{**kwargs}.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, _ = getargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError('{0} may not have * or ** args.'.format(
name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args), len(__args) - len(arg_names))])
new_kwargs = dict((arg, val) for arg, val in __kw.items()
if arg not in arg_names)
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn(u"The trailing arguments ('{0}') of {1} are deprecated. "
u"The value(s) provided for '{2}' have been dropped.".
format("', '".join(arg_names),
name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module=None, target_module=None,
old_name=None, class_name=None):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@type source_module: basestring
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@type target_module: basestring
@param old_name: The old function name. If None it uses the name of the
new function.
@type old_name: basestring
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@type class_name: basestring
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(old_name, new_name, 2)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module is '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, basestring):
module = sys.modules[module]
super(ModuleDeprecationWrapper, self).__setattr__('_deprecated', {})
super(ModuleDeprecationWrapper, self).__setattr__('_module', module)
self.__dict__.update(module.__dict__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name, replacement=None,
replacement_name=None, warning_message=None):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@type name: str
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
If it contains a '.', it will be interpreted as a Python dotted
object name, and evaluated when the deprecated object is needed.
@type replacement_name: str
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@type warning_message: basestring
"""
if '.' in name:
raise ValueError('Deprecated name "{0}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{0}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{0}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
if replacement_name:
warning_message = '{0}.{1} is deprecated; use {2} instead.'
else:
warning_message = u"{0}.{1} is deprecated."
self._deprecated[name] = replacement_name, replacement, warning_message
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
self.__dict__[attr] = value
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
warning_message = self._deprecated[attr][2]
warn(warning_message.format(self._module.__name__, attr,
self._deprecated[attr][0]),
DeprecationWarning, 2)
if self._deprecated[attr][1]:
return self._deprecated[attr][1]
elif '.' in self._deprecated[attr][0]:
try:
package_name = self._deprecated[attr][0].split('.', 1)[0]
module = __import__(package_name)
context = {package_name: module}
replacement = eval(self._deprecated[attr][0], context)
self._deprecated[attr] = (
self._deprecated[attr][0],
replacement,
self._deprecated[attr][2]
)
return replacement
except Exception:
pass
return getattr(self._module, attr)
@deprecated('open_archive()')
def open_compressed(filename, use_extension=False):
"""DEPRECATED: Open a file and uncompress it if needed."""
return open_archive(filename, use_extension=use_extension)
| mit |
lsinfo/odoo | addons/l10n_lu/scripts/tax2csv.py | 257 | 7763 | from collections import OrderedDict
import csv
import xlrd
def _e(s):
if type(s) is unicode:
return s.encode('utf8')
elif s is None:
return ''
else:
return str(s)
def _is_true(s):
return s not in ('F', 'False', 0, '', None, False)
class LuxTaxGenerator:
def __init__(self, filename):
self.workbook = xlrd.open_workbook('tax.xls')
self.sheet_info = \
self.workbook.sheet_by_name('INFO')
self.sheet_taxes = \
self.workbook.sheet_by_name('TAXES')
self.sheet_tax_codes = \
self.workbook.sheet_by_name('TAX.CODES')
self.sheet_fiscal_pos_map = \
self.workbook.sheet_by_name('FISCAL.POSITION.MAPPINGS')
self.suffix = self.sheet_info.cell_value(4, 2)
def iter_tax_codes(self):
keys = map(lambda c: c.value, self.sheet_tax_codes.row(0))
yield keys
for i in range(1, self.sheet_tax_codes.nrows):
row = map(lambda c: c.value, self.sheet_tax_codes.row(i))
d = OrderedDict(zip(keys, row))
d['sign'] = int(d['sign'])
d['sequence'] = int(d['sequence'])
yield d
def iter_taxes(self):
keys = map(lambda c: c.value, self.sheet_taxes.row(0))
yield keys
for i in range(1, self.sheet_taxes.nrows):
row = map(lambda c: c.value, self.sheet_taxes.row(i))
yield OrderedDict(zip(keys, row))
def iter_fiscal_pos_map(self):
keys = map(lambda c: c.value, self.sheet_fiscal_pos_map.row(0))
yield keys
for i in range(1, self.sheet_fiscal_pos_map.nrows):
row = map(lambda c: c.value, self.sheet_fiscal_pos_map.row(i))
yield OrderedDict(zip(keys, row))
def tax_codes_to_csv(self):
writer = csv.writer(open('account.tax.code.template-%s.csv' %
self.suffix, 'wb'))
tax_codes_iterator = self.iter_tax_codes()
keys = tax_codes_iterator.next()
writer.writerow(keys)
# write structure tax codes
tax_codes = {} # code: id
for row in tax_codes_iterator:
tax_code = row['code']
if tax_code in tax_codes:
raise RuntimeError('duplicate tax code %s' % tax_code)
tax_codes[tax_code] = row['id']
writer.writerow(map(_e, row.values()))
# read taxes and add leaf tax codes
new_tax_codes = {} # id: parent_code
def add_new_tax_code(tax_code_id, new_name, new_parent_code):
if not tax_code_id:
return
name, parent_code = new_tax_codes.get(tax_code_id, (None, None))
if parent_code and parent_code != new_parent_code:
raise RuntimeError('tax code "%s" already exist with '
'parent %s while trying to add it with '
'parent %s' %
(tax_code_id, parent_code, new_parent_code))
else:
new_tax_codes[tax_code_id] = (new_name, new_parent_code)
taxes_iterator = self.iter_taxes()
keys = taxes_iterator.next()
for row in taxes_iterator:
if not _is_true(row['active']):
continue
if row['child_depend'] and row['amount'] != 1:
raise RuntimeError('amount must be one if child_depend '
'for %s' % row['id'])
# base parent
base_code = row['BASE_CODE']
if not base_code or base_code == '/':
base_code = 'NA'
if base_code not in tax_codes:
raise RuntimeError('undefined tax code %s' % base_code)
if base_code != 'NA':
if row['child_depend']:
raise RuntimeError('base code specified '
'with child_depend for %s' % row['id'])
if not row['child_depend']:
# ... in lux, we have the same code for invoice and refund
if base_code != 'NA':
assert row['base_code_id:id'], 'missing base_code_id for %s' % row['id']
assert row['ref_base_code_id:id'] == row['base_code_id:id']
add_new_tax_code(row['base_code_id:id'],
'Base - ' + row['name'],
base_code)
# tax parent
tax_code = row['TAX_CODE']
if not tax_code or tax_code == '/':
tax_code = 'NA'
if tax_code not in tax_codes:
raise RuntimeError('undefined tax code %s' % tax_code)
if tax_code == 'NA':
if row['amount'] and not row['child_depend']:
raise RuntimeError('TAX_CODE not specified '
'for non-zero tax %s' % row['id'])
if row['tax_code_id:id']:
raise RuntimeError('tax_code_id specified '
'for tax %s' % row['id'])
else:
if row['child_depend']:
raise RuntimeError('TAX_CODE specified '
'with child_depend for %s' % row['id'])
if not row['amount']:
raise RuntimeError('TAX_CODE specified '
'for zero tax %s' % row['id'])
if not row['tax_code_id:id']:
raise RuntimeError('tax_code_id not specified '
'for tax %s' % row['id'])
if not row['child_depend'] and row['amount']:
# ... in lux, we have the same code for invoice and refund
assert row['tax_code_id:id'], 'missing tax_code_id for %s' % row['id']
assert row['ref_tax_code_id:id'] == row['tax_code_id:id']
add_new_tax_code(row['tax_code_id:id'],
'Taxe - ' + row['name'],
tax_code)
for tax_code_id in sorted(new_tax_codes):
name, parent_code = new_tax_codes[tax_code_id]
writer.writerow((tax_code_id,
'lu_tct_m' + parent_code,
tax_code_id.replace('lu_tax_code_template_', ''),
'1',
'',
_e(name),
''))
def taxes_to_csv(self):
writer = csv.writer(open('account.tax.template-%s.csv' %
self.suffix, 'wb'))
taxes_iterator = self.iter_taxes()
keys = taxes_iterator.next()
writer.writerow(keys[3:] + ['sequence'])
seq = 100
for row in sorted(taxes_iterator, key=lambda r: r['description']):
if not _is_true(row['active']):
continue
seq += 1
if row['parent_id:id']:
cur_seq = seq + 1000
else:
cur_seq = seq
writer.writerow(map(_e, row.values()[3:]) + [cur_seq])
def fiscal_pos_map_to_csv(self):
writer = csv.writer(open('account.fiscal.'
'position.tax.template-%s.csv' %
self.suffix, 'wb'))
fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
keys = fiscal_pos_map_iterator.next()
writer.writerow(keys)
for row in fiscal_pos_map_iterator:
writer.writerow(map(_e, row.values()))
if __name__ == '__main__':
o = LuxTaxGenerator('tax.xls')
o.tax_codes_to_csv()
o.taxes_to_csv()
o.fiscal_pos_map_to_csv()
| agpl-3.0 |
keisuke-umezawa/chainer | chainermn/links/create_mnbn_model.py | 2 | 2445 | import chainer
import chainermn
import copy
def create_mnbn_model(link, comm, communication_backend='auto'):
"""Create a link object with MultiNodeBatchNormalization.
Returns a copy of `link`, where BatchNormalization is replaced
by MultiNodeBatchNormalization.
Args:
link: Link object
comm: ChainerMN communicator
communication_backend (str): ``mpi``, ``nccl`` or ``auto``. It is used
to determine communication backend of MultiNodeBatchNormalization.
If ``auto``, use the best communication backend for each
communicator.
Returns:
Link object where BatchNormalization is replaced
by MultiNodeBatchNormalization.
"""
if isinstance(link, chainer.links.BatchNormalization):
mnbn = chainermn.links.MultiNodeBatchNormalization(
size=link.avg_mean.shape,
comm=comm,
decay=link.decay,
eps=link.eps,
dtype=link.avg_mean.dtype,
use_gamma=hasattr(link, 'gamma'),
use_beta=hasattr(link, 'beta'),
communication_backend=communication_backend,
)
mnbn.copyparams(link)
for name in link._persistent:
mnbn.__dict__[name] = copy.deepcopy(link.__dict__[name])
return mnbn
elif isinstance(link, chainer.Chain):
new_children = [
(child_name, create_mnbn_model(link.__dict__[child_name], comm,
communication_backend))
for child_name in link._children
]
new_link = copy.deepcopy(link)
for name, new_child in new_children:
new_link.__dict__[name] = new_child
return new_link
elif isinstance(link, chainer.Sequential):
new_children = [
create_mnbn_model(l, comm, communication_backend) for l in link]
new_link = copy.deepcopy(link)
for i, new_child in enumerate(new_children):
new_link._layers[i] = new_child
return new_link
elif isinstance(link, chainer.ChainList):
new_children = [
create_mnbn_model(l, comm, communication_backend) for l in link]
new_link = copy.deepcopy(link)
for i, new_child in enumerate(new_children):
new_link._children[i] = new_child
return new_link
else:
assert isinstance(link, chainer.Link)
return copy.deepcopy(link)
| mit |
ppiotr/Invenio | modules/miscutil/lib/plotextractor_converter.py | 17 | 8026 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
import re
from invenio.shellutils import run_shell_command, run_process_with_timeout, Timeout
from invenio.plotextractor_output_utils import get_converted_image_name, \
write_message
def untar(original_tarball, sdir):
"""
Here we decide if our file is actually a tarball (sometimes the
'tarballs' gotten from arXiv aren't actually tarballs. If they
'contain' only the TeX file, then they are just that file.), then
we untar it if so and decide which of its constituents are the
TeX file and which are the images.
@param: tarball (string): the name of the tar file from arXiv
@param: dir (string): the directory where we would like it untarred to
@return: (image_list, tex_file) (([string, string, ...], string)):
list of images in the tarball and the name of the TeX file in the
tarball.
"""
tarball = check_for_gzip(original_tarball)
dummy1, cmd_out, cmd_err = run_shell_command('file %s', (tarball,))
tarball_output = 'tar archive'
if re.search(tarball_output, cmd_out) == None:
run_shell_command('rm %s', (tarball,))
return ([], [], None)
cmd_list = ['tar', 'xvf', tarball, '-C', sdir]
dummy1, cmd_out, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err != '':
return ([], [], None)
if original_tarball != tarball:
run_shell_command('rm %s', (tarball,))
cmd_out = cmd_out.split('\n')
tex_output_contains = 'TeX'
tex_file_extension = 'tex'
image_output_contains = 'image'
eps_output_contains = '- type eps'
ps_output_contains = 'Postscript'
file_list = []
image_list = []
might_be_tex = []
for extracted_file in cmd_out:
if extracted_file == '':
break
if extracted_file.startswith('./'):
extracted_file = extracted_file[2:]
# ensure we are actually looking at the right file
extracted_file = os.path.join(sdir, extracted_file)
# Add to full list of extracted files
file_list.append(extracted_file)
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (extracted_file,))
# is it TeX?
if cmd_out.find(tex_output_contains) > -1:
might_be_tex.append(extracted_file)
# is it an image?
elif cmd_out.lower().find(image_output_contains) > cmd_out.find(':') \
or \
cmd_out.lower().find(eps_output_contains) > cmd_out.find(':')\
or \
cmd_out.find(ps_output_contains) > cmd_out.find(':'):
# we have "image" in the output, and it is not in the filename
# i.e. filename.ext: blah blah image blah blah
image_list.append(extracted_file)
# if neither, maybe it is TeX or an image anyway, otherwise,
# we don't care
else:
if extracted_file.split('.')[-1].lower() == tex_file_extension:
# we might have tex source!
might_be_tex.append(extracted_file)
elif extracted_file.split('.')[-1] in ['eps', 'png', \
'ps', 'jpg', 'pdf']:
# we might have an image!
image_list.append(extracted_file)
if might_be_tex == []:
# well, that's tragic
# could not find TeX file in tar archive
return ([], [], [])
return (file_list, image_list, might_be_tex)
def check_for_gzip(tfile):
"""
Was that tarball also gzipped? Let's find out!
@param: file (string): the name of the object (so we can gunzip, if
that's necessary)
@output: a gunzipped file in the directory of choice, if that's necessary
@return new_file (string): The name of the file after gunzipping or the
original name of the file if that wasn't necessary
"""
gzip_contains = 'gzip compressed data'
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (tfile,))
if cmd_out.find(gzip_contains) > -1:
# we have a gzip!
# so gzip is retarded and won't accept any file that doesn't end
# with .gz. sad.
run_shell_command('cp %s %s' % (tfile, tfile + '.tar.gz'))
new_dest = os.path.join(os.path.split(tfile)[0], 'tmp.tar')
run_shell_command('touch %s' % (new_dest,))
dummy1, cmd_out, cmd_err = run_shell_command('gunzip -c %s' % \
(tfile + '.tar.gz',))
if cmd_err != '':
write_message('Error while gunzipping ' + tfile)
return tfile
tarfile = open(new_dest, 'w')
tarfile.write(cmd_out)
tarfile.close()
run_shell_command('rm %s', (tfile + '.tar.gz',))
return new_dest
return tfile
def convert_images(image_list):
"""
Here we figure out the types of the images that were extracted from
the tarball and determine how to convert them into PNG.
@param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
@return: image_list ([str, str, ...]): The list of image files when all
have been converted to PNG format.
"""
png_output_contains = 'PNG image'
ret_list = []
for image_file in image_list:
if os.path.isdir(image_file):
continue
# FIXME: here and everywhere else in the plot extractor
# library the run shell command statements should be (1)
# called with timeout in order to prevent runaway imagemagick
# conversions; (2) the arguments should be passed properly so
# that they are escaped.
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (image_file,))
if cmd_out.find(png_output_contains) > -1:
ret_list.append(image_file)
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
cmd_list = ['convert', image_file, converted_image_file]
try:
dummy1, cmd_out, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err == '':
ret_list.append(converted_image_file)
else:
write_message('convert failed on ' + image_file)
except Timeout:
write_message('convert timed out on ' + image_file)
return ret_list
def extract_text(tarball):
"""
We check to see if there's a file called tarball.pdf, and, if there is,
we run pdftotext on it. Simple as that.
@param: tarball (string): the raw name of the tarball
@return: None
"""
try:
os.stat(tarball + '.pdf')
cmd_list = ['pdftotext', tarball + '.pdf ', tarball + '.txt']
dummy1, dummy2, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err != '':
return - 1
write_message('generated ' + tarball + '.txt from ' + tarball + '.pdf')
except:
write_message('no text from ' + tarball + '.pdf')
| gpl-2.0 |
daenamkim/ansible | lib/ansible/modules/files/ini_file.py | 34 | 10180 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ini_file
short_description: Tweak settings in INI files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
sections if they don't exist.
- Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
- Since version 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
no other modifications need to be applied.
version_added: "0.9"
options:
path:
description:
- Path to the INI-style file; this file is created if required.
- Before 2.3 this option was only usable as I(dest).
aliases: [ dest ]
required: true
section:
description:
- Section name in INI file. This is added if C(state=present) automatically when
a single value is being set.
- If left empty or set to `null`, the I(option) will be placed before the first I(section).
Using `null` is also required if the config format does not support sections.
required: true
option:
description:
- If set (required for changing a I(value)), this is the name of the option.
- May be omitted if adding/removing a whole I(section).
value:
description:
- The string value to be associated with an I(option). May be omitted when removing an I(option).
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
others:
description:
- All arguments accepted by the M(file) module also work here
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
choices: [ absent, present ]
default: present
no_extra_spaces:
description:
- Do not insert spaces before and after '=' symbol
type: bool
default: 'no'
version_added: "2.1"
create:
description:
- If set to 'no', the module will fail if the file does not already exist.
By default it will create the file if it is missing.
type: bool
default: 'yes'
version_added: "2.2"
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes
no sense.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but
I(dest) still works as well.
author:
- Jan-Piet Mens (@jpmens)
- Ales Nosek (@noseka1)
'''
EXAMPLES = '''
# Before 2.3, option 'dest' was used instead of 'path'
- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
ini_file:
path: /etc/conf
section: drinks
option: fav
value: lemonade
mode: 0600
backup: yes
- ini_file:
path: /etc/anotherconf
section: drinks
option: temperature
value: cold
backup: yes
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
def match_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*=' % option, line) \
or re.match('#( |\t)*%s( |\t)*=' % option, line) \
or re.match(';( |\t)*%s( |\t)*=' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*=' % option, line)
def do_ini(module, filename, section=None, option=None, value=None,
state='present', backup=False, no_extra_spaces=False, create=True):
diff = dict(
before='',
after='',
before_header='%s (content)' % filename,
after_header='%s (content)' % filename,
)
if not os.path.exists(filename):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
destpath = os.path.dirname(filename)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
ini_lines = []
else:
ini_file = open(filename, 'r')
try:
ini_lines = ini_file.readlines()
finally:
ini_file.close()
if module._diff:
diff['before'] = ''.join(ini_lines)
changed = False
# ini file could be empty
if not ini_lines:
ini_lines.append('\n')
# last line of file may not contain a trailing newline
if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
ini_lines[-1] += '\n'
changed = True
# append a fake section line to simplify the logic
ini_lines.append('[')
within_section = not section
section_start = 0
msg = 'OK'
if no_extra_spaces:
assignment_format = '%s=%s\n'
else:
assignment_format = '%s = %s\n'
for index, line in enumerate(ini_lines):
if line.startswith('[%s]' % section):
within_section = True
section_start = index
elif line.startswith('['):
if within_section:
if state == 'present':
# insert missing option line at the end of the section
for i in range(index, 0, -1):
# search backwards for previous non-blank or non-comment line
if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
ini_lines.insert(i, assignment_format % (option, value))
msg = 'option added'
changed = True
break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
msg = 'section removed'
changed = True
break
else:
if within_section and option:
if state == 'present':
# change the existing option line
if match_opt(option, line):
newline = assignment_format % (option, value)
option_changed = ini_lines[index] != newline
changed = changed or option_changed
if option_changed:
msg = 'option changed'
ini_lines[index] = newline
if option_changed:
# remove all possible option occurrences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
if line.startswith('['):
break
if match_active_opt(option, line):
del ini_lines[index]
else:
index = index + 1
break
elif state == 'absent':
# delete the existing line
if match_active_opt(option, line):
del ini_lines[index]
changed = True
msg = 'option changed'
break
# remove the fake section line
del ini_lines[-1:]
if not within_section and option and state == 'present':
ini_lines.append('[%s]\n' % section)
ini_lines.append(assignment_format % (option, value))
changed = True
msg = 'section and option added'
if module._diff:
diff['after'] = ''.join(ini_lines)
backup_file = None
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
ini_file = open(filename, 'w')
try:
ini_file.writelines(ini_lines)
finally:
ini_file.close()
return (changed, backup_file, diff, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest']),
section=dict(type='str', required=True),
option=dict(type='str'),
value=dict(type='str'),
backup=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
no_extra_spaces=dict(type='bool', default=False),
create=dict(type='bool', default=True)
),
add_file_common_args=True,
supports_check_mode=True,
)
path = module.params['path']
section = module.params['section']
option = module.params['option']
value = module.params['value']
state = module.params['state']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
create = module.params['create']
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
results = dict(
changed=changed,
diff=diff,
msg=msg,
path=path,
)
if backup_file is not None:
results['backup_file'] = backup_file
# Mission complete
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
Havner/smack-namespace | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
alfanugraha/LUMENS-repo | processing/algs/PointsFromLines.py | 4 | 6553 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsFromLines.py
---------------------
Date : August 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from osgeo import gdal
from qgis.core import *
from processing.tools import vector, raster, dataobjects
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterVector import ParameterVector
from processing.outputs.OutputVector import OutputVector
class PointsFromLines(GeoAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
RASTER_BAND = 'RASTER_BAND'
INPUT_VECTOR = 'INPUT_VECTOR'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name = 'Get raster values at line nodes'
self.group = 'Vector analysis tools'
self.addParameter(ParameterRaster(self.INPUT_RASTER, 'Raster layer'))
self.addParameter(ParameterVector(self.INPUT_VECTOR, 'Vector layer',
[ParameterVector.VECTOR_TYPE_LINE]))
self.addOutput(OutputVector(self.OUTPUT_LAYER, 'Output layer'))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT_VECTOR))
rasterPath = unicode(self.getParameterValue(self.INPUT_RASTER))
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterDS = None
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
fields.append(QgsField('line_id', QVariant.Int, '', 10, 0))
fields.append(QgsField('point_id', QVariant.Int, '', 10, 0))
writer = self.getOutputFromName(
self.OUTPUT_LAYER).getVectorWriter(fields.toList(),
QGis.WKBPoint,
layer.crs())
outFeature = QgsFeature()
outFeature.setFields(fields)
self.fid = 0
self.lineId = 0
self.pointId = 0
current = 0
features = vector.features(layer)
total = 100.0 / len(features)
for f in features:
geom = f.geometry()
if geom.isMultipart():
lines = geom.asMultiPolyline()
for line in lines:
for i in xrange(len(line) - 1):
p1 = line[i]
p2 = line[i + 1]
(x1, y1) = raster.mapToPixel(p1.x(), p1.y(),
geoTransform)
(x2, y2) = raster.mapToPixel(p2.x(), p2.y(),
geoTransform)
self.buildLine(x1, y1, x2, y2, geoTransform,
writer, outFeature)
else:
points = geom.asPolyline()
for i in xrange(len(points) - 1):
p1 = points[i]
p2 = points[i + 1]
(x1, y1) = raster.mapToPixel(p1.x(), p1.y(), geoTransform)
(x2, y2) = raster.mapToPixel(p2.x(), p2.y(), geoTransform)
self.buildLine(x1, y1, x2, y2, geoTransform, writer,
outFeature)
self.pointId = 0
self.lineId += 1
current += 1
progress.setPercentage(int(current * total))
del writer
def buildLine(self, startX, startY, endX, endY, geoTransform, writer,
feature):
point = QgsPoint()
if startX == endX:
if startY > endY:
(startY, endY) = (endY, startY)
row = startX
for col in xrange(startY, endY + 1):
self.createPoint(row, col, geoTransform, writer, feature)
elif startY == endY:
if startX > endX:
(startX, endX) = (endX, startX)
col = startY
for row in xrange(startX, endX + 1):
self.createPoint(row, col, geoTransform, writer, feature)
else:
width = endX - startX
height = endY - startY
if width < 0:
dx1 = -1
dx2 = -1
else:
dx1 = 1
dx2 = 1
if height < 0:
dy1 = -1
else:
dy1 = 1
dy2 = 0
longest = abs(width)
shortest = abs(height)
if not longest > shortest:
(longest, shortest) = (shortest, longest)
if height < 0:
dy2 = -1
else:
dy2 = 1
dx2 = 0
err = longest / 2
for i in xrange(longest + 1):
self.createPoint(startX, startY, geoTransform, writer, feature)
err += shortest
if not err < longest:
err = err - longest
startX += dx1
startY += dy1
else:
startX += dx2
startY += dy2
def createPoint(self, pX, pY, geoTransform, writer, feature):
(x, y) = raster.pixelToMap(pX, pY, geoTransform)
feature.setGeometry(QgsGeometry.fromPoint(QgsPoint(x, y)))
feature['id'] = self.fid
feature['line_id'] = self.lineId
feature['point_id'] = self.pointId
self.fid += 1
self.pointId += 1
writer.addFeature(feature)
| gpl-2.0 |
b1nary0mega/python | arp_disc.py | 1 | 1130 | #!/usr/bin/python
"""ARP scan a given network"""
import logging
import subprocess
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
__author__ = "James R. Aylesworth"
__copyright__ = "Copyright 2018"
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "James R. Aylesworth"
__email__ = "james.aylesworth@gmail.com"
__status__ = "Production"
if len(sys.argv) != 2:
print "Usage - ./arp_disc.py [interface]"
print "Example - ./arp_disc.py eth0"
print "Example will perform an ARP scan on the local subnet to which eth0 is assigned"
sys.exit()
interface = str(sys.argv[1])
ip = subprocess.check_output("ip -o addr show | grep inet\\ | grep " +
interface + " | cut -d ' ' -f 7", shell=True).strip()
print("\nNetwork found ==> " + ip)
prefix = ip.split('.')[0] + '.' + ip.split('.')[1] + \
'.' + ip.split('.')[2] + '.'
print("\nScanning ==> " + prefix + "0/24\n")
for addr in range(0, 254):
answer = sr1(ARP(pdst=prefix + str(addr)), timeout=1, verbose=0)
if answer == None:
pass
else:
print prefix + str(addr)
| gpl-2.0 |
ingresso-group/pyticketswitch | pyticketswitch/ticket_type.py | 1 | 2229 | from pyticketswitch.price_band import PriceBand
from pyticketswitch.mixins import JSONMixin
class TicketType(JSONMixin, object):
"""Describes a collection of tickets.
Generally this represents a part of house in a venue, but may have other
meanings in contexts outside theater and music.
Attributes:
code (str): identifier for the ticket type.
description (str): human readable description of the ticket type.
price_bands (list): list of
:class:`PriceBands <pyticketswitch.price_band.PriceBand>` objects
wich further subdivided available tickets/seats by price.
"""
def __init__(self, code=None, description=None, price_bands=None):
self.code = code
self.description = description
self.price_bands = price_bands
@classmethod
def from_api_data(cls, data):
"""Creates a new PriceBand object from API data from ticketswitch.
Args:
data (dict): the part of the response from a ticketswitch API call
that concerns a price band.
Returns:
:class:`PriceBand <pyticketswitch.price_band.PriceBand>`: a new
:class:`PriceBand <pyticketswitch.price_band.PriceBand>` object
populated with the data from the api.
"""
price_bands = []
api_price_bands = data.get('price_band', [])
for single_band in api_price_bands:
price_bands.append(PriceBand.from_api_data(single_band))
kwargs = {
'code': data.get('ticket_type_code', None),
'description': data.get('ticket_type_desc', None),
'price_bands': price_bands,
}
return cls(**kwargs)
def get_seats(self):
"""Get seats in the ticket type.
Returns:
list: list of :class:`Seats <pyticketswitch.seat.Seat>` objects.
"""
if not self.price_bands:
return []
return [
seat
for price_band in self.price_bands
for seat in price_band.get_seats()
]
def __repr__(self):
return u'<TicketType {}: {}>'.format(
self.code, self.description.encode('ascii', 'ignore'))
| mit |
lunafeng/django | tests/admin_widgets/widgetadmin.py | 368 | 1345 | from django.contrib import admin
from . import models
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ['make', 'model', 'owner']
list_editable = ['owner']
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = models.Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super(CarTireAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ['main_band', 'supporting_bands']
class AlbumAdmin(admin.ModelAdmin):
fields = ('name', 'cover_art',)
readonly_fields = ('cover_art',)
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ('students',)
filter_horizontal = ('alumni',)
site = WidgetAdmin(name='widget-admin')
site.register(models.User)
site.register(models.Car, CarAdmin)
site.register(models.CarTire, CarTireAdmin)
site.register(models.Member)
site.register(models.Band)
site.register(models.Event, EventAdmin)
site.register(models.Album, AlbumAdmin)
site.register(models.Inventory)
site.register(models.Bee)
site.register(models.Advisor)
site.register(models.School, SchoolAdmin)
site.register(models.Profile)
| bsd-3-clause |
hyuh/kernel-k2 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
jrief/djangocms-cascade | cmsplugin_cascade/bootstrap4/tabs.py | 1 | 2880 | from django.forms import widgets
from django.forms.fields import BooleanField, CharField
from django.utils.translation import ngettext_lazy, gettext_lazy as _
from django.utils.text import Truncator
from django.utils.safestring import mark_safe
from django.forms.fields import IntegerField
from entangled.forms import EntangledModelFormMixin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer
from cmsplugin_cascade.widgets import NumberInputWidget
from .plugin_base import BootstrapPluginBase
class TabSetFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin):
num_children = IntegerField(
min_value=1,
initial=1,
widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}),
label=_("Number of Tabs"),
help_text=_("Number can be adjusted at any time."),
)
justified = BooleanField(
label=_("Justified tabs"),
required=False,
)
class Meta:
untangled_fields = ['num_children']
entangled_fields = {'glossary': ['justified']}
class BootstrapTabSetPlugin(TransparentWrapper, BootstrapPluginBase):
name = _("Tab Set")
parent_classes = ['BootstrapColumnPlugin']
direct_child_classes = ['BootstrapTabPanePlugin']
require_parent = True
allow_children = True
form = TabSetFormMixin
render_template = 'cascade/bootstrap4/{}tabset.html'
default_css_class = 'nav-tabs'
@classmethod
def get_identifier(cls, instance):
num_cols = instance.get_num_children()
content = ngettext_lazy('with {} tab', 'with {} tabs', num_cols).format(num_cols)
return mark_safe(content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super().save_model(request, obj, form, change)
self.extend_children(obj, wanted_children, BootstrapTabPanePlugin)
plugin_pool.register_plugin(BootstrapTabSetPlugin)
class TabPaneFormMixin(EntangledModelFormMixin):
tab_title = CharField(
label=_("Tab Title"),
widget=widgets.TextInput(attrs={'size': 80}),
)
class Meta:
entangled_fields = {'glossary': ['tab_title']}
class BootstrapTabPanePlugin(TransparentContainer, BootstrapPluginBase):
name = _("Tab Pane")
direct_parent_classes = parent_classes = ['BootstrapTabSetPlugin']
require_parent = True
allow_children = True
alien_child_classes = True
form = TabPaneFormMixin
@classmethod
def get_identifier(cls, obj):
content = obj.glossary.get('tab_title', '')
if content:
content = Truncator(content).words(3, truncate=' ...')
return mark_safe(content)
plugin_pool.register_plugin(BootstrapTabPanePlugin)
| mit |
geekboxzone/lollipop_external_chromium_org | tools/telemetry/telemetry/core/platform/posix_platform_backend.py | 26 | 4514 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils.spawn
import logging
import os
import re
import stat
import subprocess
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import ps_util
class PosixPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
# This is an abstract class. It is OK to have abstract methods.
# pylint: disable=W0223
def RunCommand(self, args):
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
def GetFileContents(self, path):
with open(path, 'r') as f:
return f.read()
def GetPsOutput(self, columns, pid=None):
"""Returns output of the 'ps' command as a list of lines.
Subclass should override this function.
Args:
columns: A list of require columns, e.g., ['pid', 'pss'].
pid: If not None, returns only the information of the process
with the pid.
"""
args = ['ps']
args.extend(['-p', str(pid)] if pid != None else ['-e'])
for c in columns:
args.extend(['-o', c + '='])
return self.RunCommand(args).splitlines()
def _GetTopOutput(self, pid, columns):
"""Returns output of the 'top' command as a list of lines.
Args:
pid: pid of process to examine.
columns: A list of require columns, e.g., ['idlew', 'vsize'].
"""
args = ['top']
args.extend(['-pid', str(pid), '-l', '1', '-s', '0', '-stats',
','.join(columns)])
return self.RunCommand(args).splitlines()
def GetChildPids(self, pid):
"""Returns a list of child pids of |pid|."""
ps_output = self.GetPsOutput(['pid', 'ppid', 'state'])
ps_line_re = re.compile(
'\s*(?P<pid>\d+)\s*(?P<ppid>\d+)\s*(?P<state>\S*)\s*')
processes = []
for pid_ppid_state in ps_output:
m = ps_line_re.match(pid_ppid_state)
assert m, 'Did not understand ps output: %s' % pid_ppid_state
processes.append((m.group('pid'), m.group('ppid'), m.group('state')))
return ps_util.GetChildPids(processes, pid)
def GetCommandLine(self, pid):
command = self.GetPsOutput(['command'], pid)
return command[0] if command else None
def CanLaunchApplication(self, application):
return bool(distutils.spawn.find_executable(application))
def IsApplicationRunning(self, application):
ps_output = self.GetPsOutput(['command'])
application_re = re.compile(
'(.*%s|^)%s(\s|$)' % (os.path.sep, application))
return any(application_re.match(cmd) for cmd in ps_output)
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
assert application, 'Must specify application to launch'
if os.path.sep not in application:
application = distutils.spawn.find_executable(application)
assert application, 'Failed to find application in path'
args = [application]
if parameters:
assert isinstance(parameters, list), 'parameters must be a list'
args += parameters
def IsSetUID(path):
return (os.stat(path).st_mode & stat.S_ISUID) == stat.S_ISUID
def IsElevated():
p = subprocess.Popen(
['sudo', '-nv'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = p.communicate()[0]
# Some versions of sudo set the returncode based on whether sudo requires
# a password currently. Other versions return output when password is
# required and no output when the user is already authenticated.
return not p.returncode and not stdout
if elevate_privilege and not IsSetUID(application):
args = ['sudo'] + args
if not IsElevated():
print ('Telemetry needs to run %s under sudo. Please authenticate.' %
application)
subprocess.check_call(['sudo', '-v']) # Synchronously authenticate.
prompt = ('Would you like to always allow %s to be run as the current '
'user without sudo? If so, Telemetry will '
'`sudo chmod +s %s`. (y/N)' % (application, application))
if raw_input(prompt).lower() == 'y':
subprocess.check_call(['sudo', 'chmod', '+s', application])
stderror_destination = subprocess.PIPE
if logging.getLogger().isEnabledFor(logging.DEBUG):
stderror_destination = None
return subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=stderror_destination)
| bsd-3-clause |
arichar6/veusz | veusz/dialogs/custom.py | 3 | 11425 | # Copyright (C) 2009 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
from __future__ import division
import ast
import copy
from ..compat import cstrerror
from .. import qtall as qt4
from .. import document
from .veuszdialog import VeuszDialog
def _(text, disambiguation=None, context="CustomDialog"):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class CustomItemModel(qt4.QAbstractTableModel):
"""A model for editing custom items."""
# headers for type of widget
headers = {
'definition': (_('Name'), _('Definition')),
'import': (_('Module'), _('Symbol list')),
'color': (_('Name'), _('Definition')),
'colormap': (_('Name'), _('Definition')),
}
# tooltips for columns
tooltips = {
'definition': (
_('Name for constant, or function name and arguments, e.g. "f(x,y)"'),
_('Python expression defining constant or function, e.g. "x+y"')),
'import': (
_('Module to import symbols from, e.g. "scipy.special"'),
_('Comma-separated list of symbols to import or "*" to import everything')),
'color': (
_('Name of color'),
_('Definition of color ("#RRGGBB", "#RRGGBBAA" or "red")')),
'colormap': (
_('Name of colormap'),
_('Definition of colormap, defined as lists of RGB tuples, e.g. "((0,0,0),(255,255,255))"')),
}
def __init__(self, parent, doc, ctype):
"""
ctype is 'definition', 'import', 'color' or 'colormap'
"""
qt4.QAbstractTableModel.__init__(self, parent)
self.doc = doc
self.ctype = ctype
self.attr = document.OperationSetCustom.type_to_attr[ctype]
# connect notification of document change
doc.signalModified.connect(self.doUpdate)
# do not inform qt model has changed on document change
self.moddocupignore = False
def _getCustoms(self):
return getattr(self.doc.evaluate, self.attr)
def _getCustomsCopy(self):
return copy.deepcopy(self._getCustoms())
def rowCount(self, parent):
return 0 if parent.isValid() else len(self._getCustoms())+1
def columnCount(self, parent):
return 0 if parent.isValid() else 2
def data(self, index, role):
"""Lookup data in document.evaluate.customs list."""
if role in (qt4.Qt.DisplayRole, qt4.Qt.EditRole):
try:
defn = self._getCustoms()[index.row()]
except IndexError:
# empty row beyond end
return ''
col = index.column()
if self.ctype=='colormap' and col==1:
return repr(defn[col])
else:
return defn[col]
elif role == qt4.Qt.ToolTipRole:
# tooltip on row for new entries on last row
if index.row() == len(self._getCustoms()):
return self.tooltips[self.ctype][index.column()]
return None
def flags(self, index):
"""Items are editable"""
return (
qt4.Qt.ItemIsSelectable | qt4.Qt.ItemIsEditable |
qt4.Qt.ItemIsEnabled )
def headerData(self, section, orientation, role):
"""Return the headers at the top of the view."""
if role == qt4.Qt.DisplayRole:
if orientation == qt4.Qt.Horizontal:
# columns defined in headers
return self.headers[self.ctype][section]
else:
# number rows
return str(section+1)
return None
def doUpdate(self):
"""Document changed."""
if not self.moddocupignore:
self.layoutChanged.emit()
def validateName(self, val):
if self.ctype == 'import':
return document.module_re.match(val)
elif self.ctype == 'definition':
return (
document.identifier_re.match(val) or
document.function_re.match(val))
else:
# color or colormap
return val.strip() != ''
def validateDefn(self, value):
if self.ctype == 'colormap':
try:
tmp = ast.literal_eval(value)
except (ValueError, SyntaxError):
return False
return value.strip() != ''
def setData(self, index, value, role):
"""Edit an item."""
if index.isValid() and role == qt4.Qt.EditRole:
col = index.column()
row = index.row()
if col == 0:
ok = self.validateName(value)
elif col == 1:
ok = self.validateDefn(value)
if not ok:
return False
# extend if necessary
newcustom = self._getCustomsCopy()
while len(newcustom) < row+1:
if self.ctype == 'colormap':
newcustom.append(['', ((0,0,0),(255,255,255))])
else:
newcustom.append(['', ''])
if self.ctype=='colormap' and col==1:
newcustom[row][col] = eval(value)
else:
newcustom[row][col] = value
self.doc.applyOperation(
document.OperationSetCustom(self.ctype, newcustom) )
self.dataChanged.emit(index, index)
return True
return False
def deleteEntry(self, num):
"""Delete row num. True if success."""
newcustoms = self._getCustomsCopy()
if num >= len(newcustoms):
return False
self.beginRemoveRows(qt4.QModelIndex(), num, num)
del newcustoms[num]
self.moddocupignore = True
self.doc.applyOperation(
document.OperationSetCustom(self.ctype, newcustoms))
self.moddocupignore = False
self.endRemoveRows()
return True
def moveUpEntry(self, num):
"""Move up entry."""
newcustoms = self._getCustomsCopy()
if num == 0 or num >= len(newcustoms):
return False
row = newcustoms[num]
del newcustoms[num]
newcustoms.insert(num-1, row)
self.doc.applyOperation(
document.OperationSetCustom(self.ctype, newcustoms))
return True
def moveDownEntry(self, num):
"""Move down entry."""
newcustoms = self._getCustomsCopy()
if num >= len(newcustoms)-1:
return False
row = newcustoms[num]
del newcustoms[num]
newcustoms.insert(num+1, row)
self.doc.applyOperation(
document.OperationSetCustom(self.ctype, newcustoms))
return True
class CustomDialog(VeuszDialog):
"""A dialog to create or edit custom constant and function
definitions."""
def __init__(self, parent, document):
VeuszDialog.__init__(self, parent, 'custom.ui')
self.document = document
# model/view
self.defnModel = CustomItemModel(self, document, 'definition')
self.defnView.setModel(self.defnModel)
self.importModel = CustomItemModel(self, document, 'import')
self.importView.setModel(self.importModel)
self.colorModel = CustomItemModel(self, document, 'color')
self.colorView.setModel(self.colorModel)
self.colormapModel = CustomItemModel(self, document, 'colormap')
self.colormapView.setModel(self.colormapModel)
# buttons
self.removeButton.clicked.connect(self.slotRemove)
self.upButton.clicked.connect(self.slotUp)
self.downButton.clicked.connect(self.slotDown)
self.saveButton.clicked.connect(self.slotSave)
self.loadButton.clicked.connect(self.slotLoad)
# recent button shows list of recently used files for loading
self.recentButton.filechosen.connect(self.loadFile)
self.recentButton.setSetting('customdialog_recent')
def loadFile(self, filename):
"""Load the given file."""
self.document.applyOperation(
document.OperationLoadCustom(filename) )
def getTabViewAndModel(self):
"""Get view and model for currently selected tab."""
return {
0: (self.defnView, self.defnModel),
1: (self.importView, self.importModel),
2: (self.colorView, self.colorModel),
3: (self.colormapView, self.colormapModel)
}[self.viewsTab.currentIndex()]
def slotRemove(self):
"""Remove an entry."""
view, model = self.getTabViewAndModel()
selected = view.selectedIndexes()
if selected:
model.deleteEntry(selected[0].row())
def slotUp(self):
"""Move item up list."""
view, model = self.getTabViewAndModel()
selected = view.selectedIndexes()
if selected:
row = selected[0].row()
if model.moveUpEntry(row):
idx = model.index(row-1, selected[0].column())
view.setCurrentIndex(idx)
def slotDown(self):
"""Move item down list."""
view, model = self.getTabViewAndModel()
selected = view.selectedIndexes()
if selected:
row = selected[0].row()
if model.moveDownEntry(row):
idx = model.index(row+1, selected[0].column())
view.setCurrentIndex(idx)
def slotSave(self):
"""Save entries."""
filename = self.parent().fileSaveDialog(
[_('Veusz document (*.vsz)')], _('Save custom definitions'))
if filename:
try:
with open(filename, 'w') as f:
self.document.evaluate.saveCustomFile(f)
self.recentButton.addFile(filename)
except EnvironmentError as e:
qt4.QMessageBox.critical(
self, _("Error - Veusz"),
_("Unable to save '%s'\n\n%s") % (
filename, cstrerror(e)))
def slotLoad(self):
"""Load entries."""
filename = self.parent().fileOpenDialog(
[_('Veusz document (*.vsz)')], _('Load custom definitions'))
if filename:
try:
self.loadFile(filename)
except EnvironmentError as e:
qt4.QMessageBox.critical(
self, _("Error - Veusz"),
_("Unable to load '%s'\n\n%s") % (
filename, cstrerror(e)))
else:
# add to recent file list
self.recentButton.addFile(filename)
| gpl-2.0 |
kramwens/order_bot | venv/lib/python2.7/site-packages/pip/vcs/__init__.py | 344 | 12374 | """Handles all VCS (version control) support"""
from __future__ import absolute_import
import errno
import logging
import os
import shutil
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip.exceptions import BadCommand
from pip.utils import (display_path, backup_dir, call_subprocess,
rmtree, ask_path_exists)
__all__ = ['vcs', 'get_src_requirement']
logger = logging.getLogger(__name__)
class VcsSupport(object):
_registry = {}
schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
def __init__(self):
# Register more schemes with urlparse for various version control
# systems
urllib_parse.uses_netloc.extend(self.schemes)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(self.schemes)
super(VcsSupport, self).__init__()
def __iter__(self):
return self._registry.__iter__()
@property
def backends(self):
return list(self._registry.values())
@property
def dirnames(self):
return [backend.dirname for backend in self.backends]
@property
def all_schemes(self):
schemes = []
for backend in self.backends:
schemes.extend(backend.schemes)
return schemes
def register(self, cls):
if not hasattr(cls, 'name'):
logger.warning('Cannot register VCS %s', cls.__name__)
return
if cls.name not in self._registry:
self._registry[cls.name] = cls
logger.debug('Registered VCS backend: %s', cls.name)
def unregister(self, cls=None, name=None):
if name in self._registry:
del self._registry[name]
elif cls in self._registry.values():
del self._registry[cls.name]
else:
logger.warning('Cannot unregister because no class or name given')
def get_backend_name(self, location):
"""
Return the name of the version control backend if found at given
location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
"""
for vc_type in self._registry.values():
if vc_type.controls_location(location):
logger.debug('Determine that %s uses VCS: %s',
location, vc_type.name)
return vc_type.name
return None
def get_backend(self, name):
name = name.lower()
if name in self._registry:
return self._registry[name]
def get_backend_from_location(self, location):
vc_type = self.get_backend_name(location)
if vc_type:
return self.get_backend(vc_type)
return None
vcs = VcsSupport()
class VersionControl(object):
name = ''
dirname = ''
# List of supported schemes for this Version Control
schemes = ()
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def _is_local_repository(self, repo):
"""
posix absolute paths start with os.path.sep,
win32 ones start with drive (like c:\\folder)
"""
drive, tail = os.path.splitdrive(repo)
return repo.startswith(os.path.sep) or drive
# See issue #1083 for why this method was introduced:
# https://github.com/pypa/pip/issues/1083
def translate_egg_surname(self, surname):
# For example, Django has branches of the form "stable/1.7.x".
return surname.replace('/', '_')
def export(self, location):
"""
Export the repository at the url to the destination location
i.e. only download the files, without vcs informations
"""
raise NotImplementedError
def get_url_rev(self):
"""
Returns the correct repository URL and revision by parsing the given
repository URL
"""
error_message = (
"Sorry, '%s' is a malformed VCS url. "
"The format is <vcs>+<protocol>://<url>, "
"e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
)
assert '+' in self.url, error_message % self.url
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def get_info(self, location):
"""
Returns (url, revision), where both are strings
"""
assert not location.rstrip('/').endswith(self.dirname), \
'Bad directory: %s' % location
return self.get_url(location), self.get_revision(location)
def normalize_url(self, url):
"""
Normalize a URL for comparison by unquoting it and removing any
trailing slash.
"""
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
"""
Compare two repo URLs for identity, ignoring incidental differences.
"""
return (self.normalize_url(url1) == self.normalize_url(url2))
def obtain(self, dest):
"""
Called when installing or updating an editable package, takes the
source path of the checkout.
"""
raise NotImplementedError
def switch(self, dest, url, rev_options):
"""
Switch the repo at ``dest`` to point to ``URL``.
"""
raise NotImplementedError
def update(self, dest, rev_options):
"""
Update an already-existing repo to the given ``rev_options``.
"""
raise NotImplementedError
def check_version(self, dest, rev_options):
"""
Return True if the version is identical to what exists and
doesn't need to be updated.
"""
raise NotImplementedError
def check_destination(self, dest, url, rev_options, rev_display):
"""
Prepare a location to receive a checkout/clone.
Return True if the location is ready for (and requires) a
checkout/clone, False otherwise.
"""
checkout = True
prompt = False
if os.path.exists(dest):
checkout = False
if os.path.exists(os.path.join(dest, self.dirname)):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug(
'%s in %s exists, and has correct URL (%s)',
self.repo_name.title(),
display_path(dest),
url,
)
if not self.check_version(dest, rev_options):
logger.info(
'Updating %s %s%s',
display_path(dest),
self.repo_name,
rev_display,
)
self.update(dest, rev_options)
else:
logger.info(
'Skipping because already up-to-date.')
else:
logger.warning(
'%s %s in %s exists with URL %s',
self.name,
self.repo_name,
display_path(dest),
existing_url,
)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
('s', 'i', 'w', 'b'))
else:
logger.warning(
'Directory %s already exists, and is not a %s %s.',
dest,
self.name,
self.repo_name,
)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
if prompt:
logger.warning(
'The plan is to install the %s repository %s',
self.name,
url,
)
response = ask_path_exists('What to do? %s' % prompt[0],
prompt[1])
if response == 's':
logger.info(
'Switching %s %s to %s%s',
self.repo_name,
display_path(dest),
url,
rev_display,
)
self.switch(dest, url, rev_options)
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
checkout = True
elif response == 'b':
dest_dir = backup_dir(dest)
logger.warning(
'Backing up %s to %s', display_path(dest), dest_dir,
)
shutil.move(dest, dest_dir)
checkout = True
elif response == 'a':
sys.exit(-1)
return checkout
def unpack(self, location):
"""
Clean up current location and download the url repository
(and vcs infos) into location
"""
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location):
"""
Return a string representing the requirement needed to
redownload the files currently present in location, something
like:
{repository_url}@{revision}#egg={project_name}-{version_identifier}
"""
raise NotImplementedError
def get_url(self, location):
"""
Return the url used at location
Used in get_info or check_destination
"""
raise NotImplementedError
def get_revision(self, location):
"""
Return the current revision of the files at location
Used in get_info
"""
raise NotImplementedError
def run_command(self, cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_desc=None,
extra_environ=None, spinner=None):
"""
Run a VCS subcommand
This is simply a wrapper around call_subprocess that adds the VCS
command name, and checks that the VCS is available
"""
cmd = [self.name] + cmd
try:
return call_subprocess(cmd, show_stdout, cwd,
on_returncode,
command_desc, extra_environ,
spinner)
except OSError as e:
# errno.ENOENT = no such file or directory
# In other words, the VCS executable isn't available
if e.errno == errno.ENOENT:
raise BadCommand('Cannot find command %r' % self.name)
else:
raise # re-raise exception if a different error occurred
@classmethod
def controls_location(cls, location):
"""
Check if a location is controlled by the vcs.
It is meant to be overridden to implement smarter detection
mechanisms for specific vcs.
"""
logger.debug('Checking in %s for %s (%s)...',
location, cls.dirname, cls.name)
path = os.path.join(location, cls.dirname)
return os.path.exists(path)
def get_src_requirement(dist, location):
version_control = vcs.get_backend_from_location(location)
if version_control:
try:
return version_control().get_src_requirement(dist,
location)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
version_control.name,
)
return dist.as_requirement()
logger.warning(
'cannot determine version of editable source in %s (is not SVN '
'checkout, Git clone, Mercurial clone or Bazaar branch)',
location,
)
return dist.as_requirement()
| mit |
guorendong/iridium-browser-ubuntu | tools/gn/last_commit_position.py | 79 | 2736 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes the most recent "Cr-Commit-Position" value on the master branch
to a C header file.
Usage: last_commit_position.py <dir> <outfile> <headerguard>
<dir>
Some directory inside the repo to check. This will be used as the current
directory when running git. It's best to pass the repo toplevel directory.
<outfile>
C header file to write.
<headerguard>
String to use as the header guard for the written file.
"""
import os
import re
import subprocess
import sys
def RunGitCommand(directory, command):
"""
Launches git subcommand.
Errors are swallowed.
Returns:
A process object or None.
"""
command = ['git'] + command
# Force shell usage under cygwin. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
if sys.platform == 'cygwin':
command = ['sh', '-c', ' '.join(command)]
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
return proc
except OSError:
return None
def FetchCommitPosition(directory):
regex = re.compile(r'\s*Cr-Commit-Position: refs/heads/master@\{#(\d+)\}\s*')
# Search this far backward in the git log. The commit position should be
# close to the top. We allow some slop for long commit messages, and maybe
# there were some local commits after the last "official" one. Having this
# max prevents us from searching all history in the case of an error.
max_lines = 2048
proc = RunGitCommand(directory, ['log'])
for i in range(max_lines):
line = proc.stdout.readline()
if not line:
return None
match = regex.match(line)
if match:
return match.group(1)
return None
def WriteHeader(header_file, header_guard, value):
with open(header_file, 'w') as f:
f.write('''/* Generated by last_commit_position.py. */
#ifndef %(guard)s
#define %(guard)s
#define LAST_COMMIT_POSITION "%(value)s"
#endif
''' % {'guard': header_guard, 'value': value})
if len(sys.argv) != 4:
print "Wrong number of arguments"
sys.exit(1)
git_directory = sys.argv[1]
output_file = sys.argv[2]
header_guard = sys.argv[3]
value = FetchCommitPosition(git_directory)
if not value:
print "Could not get last commit position."
sys.exit(1)
WriteHeader(output_file, header_guard, value)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.