repo_name stringlengths 5 100 | path stringlengths 4 299 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1.03M | license stringclasses 15 values | hash int64 -9,223,351,895,964,839,000 9,223,297,778B | line_mean float64 3.17 100 | line_max int64 7 1k | alpha_frac float64 0.25 0.98 | autogenerated bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|
pchaigno/grreat | gui/api_object_renderers.py | 2 | 11656 | #!/usr/bin/env python
"""This module contains RESTful API renderers for AFF4 objects and RDFValues."""
import itertools
import numbers
import re
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import structs
class ApiObjectRenderer(object):
"""Baseclass for restful API objects rendering classes."""
__metaclass__ = registry.MetaclassRegistry
# API renderers can render RDFValues and AFF4Objects. Each renderer has
# to be bound either to a single AFF4 type or to a rdfvalue type.
aff4_type = None
rdfvalue_type = None
def __init__(self):
if self.aff4_type and self.rdfvalue_type:
raise ValueError("Can't have both aff4_type and rdfvalue_type set.")
if not self.aff4_type and not self.rdfvalue_type:
raise ValueError("Have to set either aff4_type or rdfvalue_type.")
_type_list_cache = {}
def GetTypeList(self, value):
try:
return ApiObjectRenderer._type_list_cache[value.__class__.__name__]
except KeyError:
type_list = [klass.__name__ for klass in value.__class__.__mro__]
ApiObjectRenderer._type_list_cache[value.__class__.__name__] = type_list
return type_list
def _ToPrimitive(self, value, request):
"""Function used to convert values to JSON-friendly data structure.
Args:
value: An value to convert. May be either a plain Python value,
an RDFValue or an Enum.
request: Request parameters dictionary.
Returns:
JSON-friendly data: a string, a number, a dict or an array.
"""
limit_lists = int(request.get("limit_lists", 0))
no_lists = request.get("no_lists", False)
# We use RenderObject (main function of this module) to render
# all AFF4 and RDF values.
if isinstance(value, rdfvalue.RDFValue):
return RenderObject(value, request)
# Repeated fields get converted to lists with each value
# being recursively converted with _ToPrimitive().
if isinstance(value, structs.RepeatedFieldHelper):
if no_lists:
return []
return list(self._ToPrimitive(v, request) for v in value)
# Plain dictionaries are converted to dictionaries with each value
# being recursively converted with _ToPrimitive().
elif isinstance(value, dict):
result = {}
for k, v in value.items():
if isinstance(v, structs.RepeatedFieldHelper):
if no_lists:
continue
if limit_lists and len(v) > 10:
result[k] = self._ToPrimitive(v[:limit_lists], request)
result[k + "_fetch_more_url"] = "to_be_implemented"
else:
result[k] = self._ToPrimitive(v, request)
else:
result[k] = self._ToPrimitive(v, request)
return result
# Enums are converted to strings representing the name of the value.
elif isinstance(value, structs.Enum):
return value.name
# Make sure string values are properly encoded, otherwise we may have
# problems with JSON-encoding them.
elif isinstance(value, basestring):
return utils.SmartUnicode(value)
# Numbers are returned as-is.
elif isinstance(value, numbers.Number):
return value
# Everything else is returned in as string.
else:
return utils.SmartUnicode(value)
def RenderObject(self, obj, request):
"""Renders given object as plain JSON-friendly data structure."""
raise NotImplementedError()
class RDFValueApiObjectRenderer(ApiObjectRenderer):
"""Renderer for a generic rdfvalue."""
rdfvalue_type = "RDFValue"
def RenderObject(self, value, request):
with_type_info = request.get("with_type_info", False)
result = value.SerializeToDataStore()
if isinstance(result, basestring):
result = utils.SmartUnicode(result)
if with_type_info:
result = dict(type=value.__class__.__name__,
mro=self.GetTypeList(value),
value=result,
age=value.age.AsSecondsFromEpoch())
return result
class RDFValueArrayApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for RDFValueArray."""
rdfvalue_type = "RDFValueArray"
def RenderObject(self, value, request):
return list(self._ToPrimitive(v, request) for v in value)
class FlowStateApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for FlowState."""
rdfvalue_type = "FlowState"
def RenderObject(self, value, request):
return self._ToPrimitive(value.data, request)
class DataBlobApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for DataBlob."""
rdfvalue_type = "DataBlob"
def RenderObject(self, value, request):
return self._ToPrimitive(value.GetValue(), request)
class EmbeddedRDFValueApiObjectRenderer(RDFValueApiObjectRenderer):
"""Renderer for EmbeddedRDFValue."""
rdfvalue_type = "EmbeddedRDFValue"
def RenderObject(self, value, request):
return self._ToPrimitive(value.payload, request)
class RDFProtoStructApiObjectRenderer(ApiObjectRenderer):
"""Renderer for RDFProtoStructs."""
rdfvalue_type = "RDFProtoStruct"
translator = {}
descriptors_cache = {}
def RenderObject(self, value, request):
with_type_info = request.get("with_type_info", False)
with_descriptors = request.get("with_descriptors", False)
result = self._ToPrimitive(value.AsDict(), request)
for key in result.keys():
if key in self.translator:
result[key] = self.translator[key](self, value, request)
# If type information is needed, converted value is placed in the
# resulting dictionary under the 'value' key.
if with_type_info:
result = dict(type=value.__class__.__name__,
mro=self.GetTypeList(value),
value=result,
age=value.age.AsSecondsFromEpoch())
if with_descriptors:
try:
descriptors, order = self.descriptors_cache[value.__class__.__name__]
except KeyError:
descriptors = {}
order = []
for descriptor, _ in value.ListFields():
order.append(descriptor.name)
descriptors[descriptor.name] = {
"friendly_name": descriptor.friendly_name,
"description": descriptor.description
}
self.descriptors_cache[value.__class__.__name__] = (descriptors,
order)
result["descriptors"] = descriptors
result["fields_order"] = order
return result
class GrrMessageApiObjectRenderer(RDFProtoStructApiObjectRenderer):
"""Renderer for GrrMessage objects."""
rdfvalue_type = "GrrMessage"
def RenderPayload(self, value, request):
return self._ToPrimitive(value.payload, request)
translator = dict(args=RenderPayload)
class AFF4ObjectApiObjectRenderer(ApiObjectRenderer):
"""Renderer for a generic AFF4 object."""
aff4_type = "AFF4Object"
def RenderObject(self, aff4_object, request):
"""Render given aff4 object into JSON-serializable data structure."""
with_type_info = request.get("with_type_info", False)
with_descriptors = request.get("with_descriptors", False)
attributes = {}
for attribute, values in aff4_object.synced_attributes.items():
attributes[attribute.predicate] = []
for value in values:
# This value is really a LazyDecoder() instance. We need to get at the
# real data here.
value = value.ToRDFValue()
if aff4_object.age_policy != aff4.NEWEST_TIME:
attributes[attribute.predicate].append(self._ToPrimitive(value,
request))
else:
attributes[attribute.predicate] = self._ToPrimitive(value, request)
result = dict(aff4_class=aff4_object.__class__.__name__,
urn=utils.SmartUnicode(aff4_object.urn),
attributes=attributes,
age_policy=aff4_object.age_policy)
if with_type_info and with_descriptors:
descriptors = {}
for attribute, _ in aff4_object.synced_attributes.items():
descriptors[attribute.predicate] = {
"description": attribute.description
}
result["descriptors"] = descriptors
return result
class RDFValueCollectionApiObjectRenderer(AFF4ObjectApiObjectRenderer):
"""Renderer for RDFValueCollections."""
aff4_type = "RDFValueCollection"
def RenderObject(self, aff4_object, request):
offset = int(request.get("offset", 0))
count = int(request.get("count", 10000))
with_total_count = request.get("with_total_count", False)
filter_value = request.get("filter", "")
if filter_value:
index = 0
items = []
for item in aff4_object.GenerateItems():
serialized_item = item.SerializeToString()
if re.search(re.escape(filter_value), serialized_item, re.I):
if index >= offset:
items.append(item)
index += 1
if len(items) >= count:
break
else:
items = itertools.islice(aff4_object.GenerateItems(),
offset, offset + count)
rendered_object = super(RDFValueCollectionApiObjectRenderer,
self).RenderObject(aff4_object, request)
rendered_object["offset"] = offset
rendered_object["items"] = [self._ToPrimitive(item, request)
for item in items]
if with_total_count:
if hasattr(aff4_object, "CalculateLength"):
total_count = aff4_object.CalculateLength()
else:
total_count = len(aff4_object)
rendered_object["total_count"] = total_count
return rendered_object
class VFSGRRClientApiObjectRenderer(AFF4ObjectApiObjectRenderer):
"""Renderer for VFSGRRClient objects."""
aff4_type = "VFSGRRClient"
def RenderObject(self, aff4_object, request):
rendered_object = super(VFSGRRClientApiObjectRenderer, self).RenderObject(
aff4_object, request)
rendered_object["summary"] = self._ToPrimitive(aff4_object.GetSummary(),
request)
return rendered_object
RENDERERS_CACHE = {}
def RenderObject(obj, request=None):
"""Handler for the /api/aff4 requests."""
if request is None:
request = {}
if isinstance(obj, aff4.AFF4Object):
is_aff4 = True
key = "aff4." + obj.__class__.__name__
elif isinstance(obj, rdfvalue.RDFValue):
is_aff4 = False
key = "rdfvalue." + obj.__class__.__name__
else:
raise ValueError("Can't render object that's neither AFF4Object nor "
"RDFValue: %s." % utils.SmartStr(obj))
try:
renderer_cls = RENDERERS_CACHE[key]
except KeyError:
candidates = []
for candidate in ApiObjectRenderer.classes.values():
if is_aff4 and candidate.aff4_type:
candidate_class = aff4.AFF4Object.classes[candidate.aff4_type]
elif candidate.rdfvalue_type:
candidate_class = rdfvalue.RDFValue.classes[candidate.rdfvalue_type]
else:
continue
if aff4.issubclass(obj.__class__, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError("No renderer found for object %s." %
obj.__class__.__name__)
candidates = sorted(candidates,
key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
RENDERERS_CACHE[key] = renderer_cls
api_renderer = renderer_cls()
rendered_data = api_renderer.RenderObject(obj, request)
return rendered_data
| apache-2.0 | -7,277,226,933,335,509,000 | 30.934247 | 80 | 0.650137 | false |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_insert.py | 115 | 8132 | from .. import fixtures, config
from ..config import requirements
from .. import exclusions
from ..assertions import eq_
from .. import engines
from sqlalchemy import Integer, String, select, literal_column, literal
from ..schema import Table, Column
class LastrowidTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
__requires__ = 'implements_get_lastrowid', 'autoincrement_insert'
__engine_options__ = {"implicit_returning": False}
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
def test_autoincrement_on_insert(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
# failed on pypy1.9 but seems to be OK on pypy 2.1
# @exclusions.fails_if(lambda: util.pypy,
# "lastrowid not maintained after "
# "connection close")
@requirements.dbapi_lastrowid
def test_native_lastrowid_autoinc(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
lastrowid = r.lastrowid
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
lastrowid, pk
)
class InsertBehaviorTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table('manual_pk', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('data', String(50))
)
Table('includes_defaults', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('x', Integer, default=5),
Column('y', Integer,
default=literal_column("2", type_=Integer) + literal(2)))
def test_autoclose_on_insert(self):
if requirements.returning.enabled:
engine = engines.testing_engine(
options={'implicit_returning': False})
else:
engine = config.db
r = engine.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.returning
def test_autoclose_on_insert_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.empty_inserts
def test_empty_insert(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
)
assert r._soft_closed
assert not r.closed
r = config.db.execute(
self.tables.autoinc_pk.select().
where(self.tables.autoinc_pk.c.id != None)
)
assert len(r.fetchall())
@requirements.insert_from_select
def test_insert_from_select(self):
table = self.tables.manual_pk
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
]
)
config.db.execute(
table.insert(inline=True).
from_select(("id", "data",),
select([table.c.id + 5, table.c.data]).
where(table.c.data.in_(["data2", "data3"]))
),
)
eq_(
config.db.execute(
select([table.c.data]).order_by(table.c.data)
).fetchall(),
[("data1", ), ("data2", ), ("data2", ),
("data3", ), ("data3", )]
)
@requirements.insert_from_select
def test_insert_from_select_with_defaults(self):
table = self.tables.includes_defaults
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
]
)
config.db.execute(
table.insert(inline=True).
from_select(("id", "data",),
select([table.c.id + 5, table.c.data]).
where(table.c.data.in_(["data2", "data3"]))
),
)
eq_(
config.db.execute(
select([table]).order_by(table.c.data, table.c.id)
).fetchall(),
[(1, 'data1', 5, 4), (2, 'data2', 5, 4),
(7, 'data2', 5, 4), (3, 'data3', 5, 4), (8, 'data3', 5, 4)]
)
class ReturningTest(fixtures.TablesTest):
run_create_tables = 'each'
__requires__ = 'returning', 'autoincrement_insert'
__backend__ = True
__engine_options__ = {"implicit_returning": True}
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(config.db.dialect.default_sequence_base, "some data")
)
@classmethod
def define_tables(cls, metadata):
Table('autoinc_pk', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
@requirements.fetch_rows_post_commit
def test_explicit_returning_pk_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
r = engine.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_explicit_returning_pk_no_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
with engine.begin() as conn:
r = conn.execute(
table.insert().returning(
table.c.id),
data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_autoincrement_on_insert_implcit_returning(self):
config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(),
data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(
r.inserted_primary_key,
[pk]
)
__all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest')
| mit | 6,111,832,288,551,379,000 | 29.230483 | 78 | 0.520413 | false |
svn2github/google-protobuf | python/google/protobuf/internal/reflection_test.py | 23 | 120400 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import copy
import gc
import operator
import struct
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
def EndOfStream(self):
return self._pos == len(self._bytes)
class ReflectionTest(basetest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self):
message = unittest_pb2.TestAllTypes(optional_int32 = 12)
self.assertEquals(2, message.ByteSize())
message = unittest_pb2.TestAllTypes(
optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage())
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(repeated_int32 = [12])
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(
repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()])
self.assertEquals(3, message.ByteSize())
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testGetDefaultMessageAfterDisconnectingDefaultMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message')
del proto
del nested
# Force a garbage collect so that the underlying CMessages are freed along
# with the Messages they point to. This is to make sure we're not deleting
# default message instances.
gc.collect()
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
def testDisconnectingNestedMessageAfterSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
self.assertTrue(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertEqual(5, nested.bb)
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testDisconnectingNestedMessageBeforeGettingField(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
def testDisconnectingNestedMessageAfterMerge(self):
# This test exercises the code path that does not use ReleaseMessage().
# The underlying fear is that if we use ReleaseMessage() incorrectly,
# we will have memory leaks. It's hard to check that that doesn't happen,
# but at least we can exercise that code path to make sure it works.
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_nested_message.bb = 5
proto1.MergeFrom(proto2)
self.assertTrue(proto1.HasField('optional_nested_message'))
proto1.ClearField('optional_nested_message')
self.assertTrue(not proto1.HasField('optional_nested_message'))
def testDisconnectingLazyNestedMessage(self):
# This test exercises releasing a nested message that is lazy. This test
# only exercises real code in the C++ implementation as Python does not
# support lazy parsing, but the current C++ implementation results in
# memory corruption and a crash.
if api_implementation.Type() != 'python':
return
proto = unittest_pb2.TestAllTypes()
proto.optional_lazy_message.bb = 5
proto.ClearField('optional_lazy_message')
del proto
gc.collect()
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in xrange(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual(b'', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual(b'world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testClearRemovesChildren(self):
# Make sure there aren't any implementation bugs that are only partially
# clearing the message (which can happen in the more complex C++
# implementation which has parallel message lists).
proto = unittest_pb2.TestRequiredForeign()
for i in range(10):
proto.repeated_message.add()
proto2 = unittest_pb2.TestRequiredForeign()
proto.CopyFrom(proto2)
self.assertRaises(IndexError, lambda: proto.repeated_message[5])
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testIntegerTypes(self):
def TestGetAndDeserialize(field_name, value, expected_type):
proto = unittest_pb2.TestAllTypes()
setattr(proto, field_name, value)
self.assertTrue(isinstance(getattr(proto, field_name), expected_type))
proto2 = unittest_pb2.TestAllTypes()
proto2.ParseFromString(proto.SerializeToString())
self.assertTrue(isinstance(getattr(proto2, field_name), expected_type))
TestGetAndDeserialize('optional_int32', 1, int)
TestGetAndDeserialize('optional_int32', 1 << 30, int)
TestGetAndDeserialize('optional_uint32', 1 << 30, int)
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
TestGetAndDeserialize('optional_uint32', 1 << 31, long)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
TestGetAndDeserialize('optional_int64', 1 << 30, long)
TestGetAndDeserialize('optional_int64', 1 << 60, long)
TestGetAndDeserialize('optional_uint64', 1 << 30, long)
TestGetAndDeserialize('optional_uint64', 1 << 60, long)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
pb = unittest_pb2.TestAllTypes()
pb.optional_nested_enum = 1
self.assertEqual(1, pb.optional_nested_enum)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
# Repeated enums tests.
#proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testEnum_Name(self):
self.assertEqual('FOREIGN_FOO',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_FOO))
self.assertEqual('FOREIGN_BAR',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAR))
self.assertEqual('FOREIGN_BAZ',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAZ))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Name, 11312)
proto = unittest_pb2.TestAllTypes()
self.assertEqual('FOO',
proto.NestedEnum.Name(proto.FOO))
self.assertEqual('FOO',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.FOO))
self.assertEqual('BAR',
proto.NestedEnum.Name(proto.BAR))
self.assertEqual('BAR',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAR))
self.assertEqual('BAZ',
proto.NestedEnum.Name(proto.BAZ))
self.assertEqual('BAZ',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAZ))
self.assertRaises(ValueError,
proto.NestedEnum.Name, 11312)
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Name, 11312)
def testEnum_Value(self):
self.assertEqual(unittest_pb2.FOREIGN_FOO,
unittest_pb2.ForeignEnum.Value('FOREIGN_FOO'))
self.assertEqual(unittest_pb2.FOREIGN_BAR,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAR'))
self.assertEqual(unittest_pb2.FOREIGN_BAZ,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAZ'))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Value, 'FO')
proto = unittest_pb2.TestAllTypes()
self.assertEqual(proto.FOO,
proto.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
unittest_pb2.TestAllTypes.NestedEnum.Value('FOO'))
self.assertEqual(proto.BAR,
proto.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAZ,
proto.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAZ'))
self.assertRaises(ValueError,
proto.NestedEnum.Value, 'Foo')
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Value, 'Foo')
def testEnum_KeysAndValues(self):
self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'],
unittest_pb2.ForeignEnum.keys())
self.assertEqual([4, 5, 6],
unittest_pb2.ForeignEnum.values())
self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6)],
unittest_pb2.ForeignEnum.items())
proto = unittest_pb2.TestAllTypes()
self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys())
self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values())
self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)],
proto.NestedEnum.items())
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in xrange(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
def testRepeatedCompositeRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
# Need to set some differentiating variable so m0 != m1 != m2:
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
self.assertTrue(m0 != m1)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertEqual(3, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m0)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
self.assertEqual(m2, proto.repeated_nested_message[1])
# Removing m0 again or removing None should raise error
self.assertRaises(ValueError, proto.repeated_nested_message.remove, m0)
self.assertRaises(ValueError, proto.repeated_nested_message.remove, None)
self.assertEqual(2, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m2)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testDescriptorProtoSupport(self):
# Hand written descriptors/reflection are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
def AddDescriptorField(proto, field_name, field_type):
AddDescriptorField.field_index += 1
new_field = proto.field.add()
new_field.name = field_name
new_field.type = field_type
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
AddDescriptorField.field_index = 0
desc_proto = descriptor_pb2.DescriptorProto()
desc_proto.name = 'Car'
fdp = descriptor_pb2.FieldDescriptorProto
AddDescriptorField(desc_proto, 'name', fdp.TYPE_STRING)
AddDescriptorField(desc_proto, 'year', fdp.TYPE_INT64)
AddDescriptorField(desc_proto, 'automatic', fdp.TYPE_BOOL)
AddDescriptorField(desc_proto, 'price', fdp.TYPE_DOUBLE)
# Add a repeated field
AddDescriptorField.field_index += 1
new_field = desc_proto.field.add()
new_field.name = 'owners'
new_field.type = fdp.TYPE_STRING
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
self.assertTrue(desc.fields_by_name.has_key('name'))
self.assertTrue(desc.fields_by_name.has_key('year'))
self.assertTrue(desc.fields_by_name.has_key('automatic'))
self.assertTrue(desc.fields_by_name.has_key('price'))
self.assertTrue(desc.fields_by_name.has_key('owners'))
class CarMessage(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = desc
prius = CarMessage()
prius.name = 'prius'
prius.year = 2010
prius.automatic = True
prius.price = 25134.75
prius.owners.extend(['bob', 'susan'])
serialized_prius = prius.SerializeToString()
new_prius = reflection.ParseMessage(desc, serialized_prius)
self.assertTrue(new_prius is not prius)
self.assertEqual(prius, new_prius)
# these are unnecessary assuming message equality works as advertised but
# explicitly check to be safe since we're mucking about in metaclass foo
self.assertEqual(prius.name, new_prius.name)
self.assertEqual(prius.year, new_prius.year)
self.assertEqual(prius.automatic, new_prius.automatic)
self.assertEqual(prius.price, new_prius.price)
self.assertEqual(prius.owners, new_prius.owners)
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testRegisteredExtensions(self):
self.assertTrue('protobuf_unittest.optional_int32_extension' in
unittest_pb2.TestAllExtensions._extensions_by_name)
self.assertTrue(1 in unittest_pb2.TestAllExtensions._extensions_by_number)
# Make sure extensions haven't been registered into types that shouldn't
# have any.
self.assertEquals(0, len(unittest_pb2.TestAllTypes._extensions_by_name))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testStaticParseFrom(self):
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = unittest_pb2.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = unittest_pb2.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testMergeFromBug(self):
message1 = unittest_pb2.TestAllTypes()
message2 = unittest_pb2.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testCopyFromBadType(self):
# The python implementation doesn't raise an exception in this
# case. In theory it should.
if api_implementation.Type() == 'python':
return
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testDeepCopy(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = copy.deepcopy(proto1)
self.assertEqual(1, proto2.optional_int32)
proto1.repeated_int32.append(2)
proto1.repeated_int32.append(3)
container = copy.deepcopy(proto1.repeated_int32)
self.assertEqual([2, 3], container)
# TODO(anuraag): Implement deepcopy for repeated composite / extension dict
def testClear(self):
proto = unittest_pb2.TestAllTypes()
# C++ implementation does not support lazy fields right now so leave it
# out for now.
if api_implementation.Type() == 'python':
test_util.SetAllFields(proto)
else:
test_util.SetAllNonLazyFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def testDisconnectingBeforeClear(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
foreign = proto.optional_foreign_message
foreign.c = 6
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
self.assertTrue(foreign is not proto.optional_foreign_message)
self.assertEqual(5, nested.bb)
self.assertEqual(6, foreign.c)
nested.bb = 15
foreign.c = 16
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertFalse(proto.HasField('optional_foreign_message'))
self.assertEqual(0, proto.optional_foreign_message.c)
def testOneOf(self):
proto = unittest_pb2.TestAllTypes()
proto.oneof_uint32 = 10
proto.oneof_nested_message.bb = 11
self.assertEqual(11, proto.oneof_nested_message.bb)
self.assertFalse(proto.HasField('oneof_uint32'))
nested = proto.oneof_nested_message
proto.oneof_string = 'abc'
self.assertEqual('abc', proto.oneof_string)
self.assertEqual(11, nested.bb)
self.assertFalse(proto.HasField('oneof_nested_message'))
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto):
self.assertFalse(proto.IsInitialized())
self.assertRaises(message.EncodeError, proto.SerializeToString)
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
@basetest.unittest.skipIf(
api_implementation.Type() != 'cpp' or api_implementation.Version() != 2,
'Errors are only available from the most recent C++ implementation.')
def testFileDescriptorErrors(self):
file_name = 'test_file_descriptor_errors.proto'
package_name = 'test_file_descriptor_errors.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = file_name
file_descriptor_proto.package = package_name
m1 = file_descriptor_proto.message_type.add()
m1.name = 'msg1'
# Compiles the proto into the C++ descriptor pool
descriptor.FileDescriptor(
file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
# Add a FileDescriptorProto that has duplicate symbols
another_file_name = 'another_test_file_descriptor_errors.proto'
file_descriptor_proto.name = another_file_name
m2 = file_descriptor_proto.message_type.add()
m2.name = 'msg2'
with self.assertRaises(TypeError) as cm:
descriptor.FileDescriptor(
another_file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
self.assertTrue(hasattr(cm, 'exception'), '%s not raised' %
getattr(cm.expected, '__name__', cm.expected))
self.assertIn('test_file_descriptor_errors.proto', str(cm.exception))
# Error message will say something about this definition being a
# duplicate, though we don't check the message exactly to avoid a
# dependency on the C++ logging code.
self.assertIn('test_file_descriptor_errors.msg1', str(cm.exception))
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', b'a\x80a')
if str is bytes: # PY2
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
else:
proto.optional_string = 'Тест'
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
bytes_read = raw.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_read)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actual bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'bytes' (in 7 bit ASCII).
badbytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * b'\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(badbytes)
except UnicodeDecodeError:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) is bytes)
def testBytesInTextFormat(self):
proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n',
unicode(proto))
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.CopyFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
bytes_read = proto.optional_nested_message.MergeFromString(b'')
self.assertEqual(0, bytes_read)
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.ParseFromString(b'')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = unittest_pb2.TestAllTypes()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertTrue(proto2.HasField('optional_nested_message'))
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(basetest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(basetest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testNoneNotEqual(self):
self.assertNotEqual(self.first_proto, None)
self.assertNotEqual(None, self.second_proto)
def testNotEqualToOtherMessage(self):
third_proto = unittest_pb2.TestRequired()
self.assertNotEqual(self.first_proto, third_proto)
self.assertNotEqual(third_proto, self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(basetest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(basetest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(basetest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
if api_implementation.Type() == 'python':
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(basetest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeWithOptionalGroup(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
first_proto.optionalgroup.a = 242
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeNegativeValues(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.optional_int32 = -1
first_proto.optional_int64 = -(2 << 40)
first_proto.optional_sint32 = -3
first_proto.optional_sint64 = -(4 << 40)
first_proto.optional_sfixed32 = -5
first_proto.optional_sfixed64 = -(6 << 40)
second_proto = unittest_pb2.TestAllTypes.FromString(
first_proto.SerializeToString())
self.assertEqual(first_proto, second_proto)
def testParseTruncated(self):
# This test is only applicable for the Python implementation of the API.
if api_implementation.Type() != 'python':
return
first_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
for truncation_point in xrange(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
pos = second_proto._InternalParse(serialized, 0, truncation_point)
# If we didn't raise an error then we read exactly the amount expected.
self.assertEqual(truncation_point, pos)
# Parsing to unknown fields should not throw if parsing to known fields
# did not.
try:
pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point)
self.assertEqual(truncation_point, pos2)
except message.DecodeError:
self.fail('Parsing unknown fields failed when parsing known fields '
'did not.')
except message.DecodeError:
# Parsing unknown fields should also fail.
self.assertRaises(message.DecodeError, unknown_fields._InternalParse,
serialized, 0, truncation_point)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
bytes_parsed = second_proto.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_parsed)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
self.assertEqual(
len(serialized),
raw.MergeFromString(serialized))
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
self.assertEqual(
len(raw.item[0].message),
message1.MergeFromString(raw.item[0].message))
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(
len(raw.item[1].message),
message2.MergeFromString(raw.item[1].message))
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto.MergeFromString(serialized))
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class as ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: '
'a,b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertFalse(proto2.HasField('a'))
# proto2 ParseFromString does not check that required fields are set.
proto2.ParseFromString(partial)
self.assertFalse(proto2.HasField('a'))
proto.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: b,c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequired is missing required fields: c')
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
self.assertEqual(
len(partial),
proto2.MergeFromString(partial))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
def testSerializeUninitializedSubMessage(self):
proto = unittest_pb2.TestRequiredForeign()
# Sub-message doesn't exist yet, so this succeeds.
proto.SerializeToString()
proto.optional_message.a = 1
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequiredForeign '
'is missing required fields: '
'optional_message.b,optional_message.c')
proto.optional_message.b = 2
proto.optional_message.c = 3
proto.SerializeToString()
proto.repeated_message.add().a = 1
proto.repeated_message.add().b = 2
self._CheckRaises(
message.EncodeError,
proto.SerializeToString,
'Message protobuf_unittest.TestRequiredForeign is missing required fields: '
'repeated_message[0].b,repeated_message[0].c,'
'repeated_message[1].a,repeated_message[1].c')
proto.repeated_message[0].b = 2
proto.repeated_message[0].c = 3
proto.repeated_message[1].a = 1
proto.repeated_message[1].c = 3
proto.SerializeToString()
def testSerializeAllPackedFields(self):
first_proto = unittest_pb2.TestPackedTypes()
second_proto = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testSerializeAllPackedExtensions(self):
first_proto = unittest_pb2.TestPackedExtensions()
second_proto = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(first_proto)
serialized = first_proto.SerializeToString()
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testMergePackedFromStringWhenSomeFieldsAlreadySet(self):
first_proto = unittest_pb2.TestPackedTypes()
first_proto.packed_int32.extend([1, 2])
first_proto.packed_double.append(3.0)
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestPackedTypes()
second_proto.packed_int32.append(3)
second_proto.packed_double.extend([1.0, 2.0])
second_proto.packed_sint32.append(4)
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual([3, 1, 2], second_proto.packed_int32)
self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double)
self.assertEqual([4], second_proto.packed_sint32)
def testPackedFieldsWireFormat(self):
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes
proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes
proto.packed_float.append(2.0) # 4 bytes, will be before double
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(1+1+1+2, d.ReadInt32())
self.assertEqual(1, d.ReadInt32())
self.assertEqual(2, d.ReadInt32())
self.assertEqual(150, d.ReadInt32())
self.assertEqual(3, d.ReadInt32())
self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(4, d.ReadInt32())
self.assertEqual(2.0, d.ReadFloat())
self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(8+8, d.ReadInt32())
self.assertEqual(1.0, d.ReadDouble())
self.assertEqual(1000.0, d.ReadDouble())
self.assertTrue(d.EndOfStream())
def testParsePackedFromUnpacked(self):
unpacked = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(unpacked)
packed = unittest_pb2.TestPackedTypes()
serialized = unpacked.SerializeToString()
self.assertEqual(
len(serialized),
packed.MergeFromString(serialized))
expected = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(expected)
self.assertEqual(expected, packed)
def testParseUnpackedFromPacked(self):
packed = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(packed)
unpacked = unittest_pb2.TestUnpackedTypes()
serialized = packed.SerializeToString()
self.assertEqual(
len(serialized),
unpacked.MergeFromString(serialized))
expected = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(expected)
self.assertEqual(expected, unpacked)
def testFieldNumbers(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51)
def testExtensionFieldNumbers(self):
self.assertEqual(unittest_pb2.TestRequired.single.number, 1000)
self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000)
self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001)
self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001)
self.assertEqual(unittest_pb2.optional_int32_extension.number, 1)
self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16)
self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16)
self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18)
self.assertEqual(
unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18)
self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21)
self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
21)
self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31)
self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46)
self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46)
self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48)
self.assertEqual(
unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48)
self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51)
self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
51)
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(
optional_int32=1,
optional_string='foo',
optional_bool=True,
optional_bytes=b'bar',
optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1),
optional_foreign_message=unittest_pb2.ForeignMessage(c=1),
optional_nested_enum=unittest_pb2.TestAllTypes.FOO,
optional_foreign_enum=unittest_pb2.FOREIGN_FOO,
repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_int32'))
self.assertTrue(proto.HasField('optional_string'))
self.assertTrue(proto.HasField('optional_bool'))
self.assertTrue(proto.HasField('optional_bytes'))
self.assertTrue(proto.HasField('optional_nested_message'))
self.assertTrue(proto.HasField('optional_foreign_message'))
self.assertTrue(proto.HasField('optional_nested_enum'))
self.assertTrue(proto.HasField('optional_foreign_enum'))
self.assertEqual(1, proto.optional_int32)
self.assertEqual('foo', proto.optional_string)
self.assertEqual(True, proto.optional_bool)
self.assertEqual(b'bar', proto.optional_bytes)
self.assertEqual(1, proto.optional_nested_message.bb)
self.assertEqual(1, proto.optional_foreign_message.c)
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
proto.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum)
self.assertEqual([1, 2, 3], proto.repeated_int32)
def testInitArgsUnknownFieldName(self):
def InitalizeEmptyMessageWithExtraKeywordArg():
unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown')
self._CheckRaises(ValueError,
InitalizeEmptyMessageWithExtraKeywordArg,
'Protocol message has no "unknown" field.')
def testInitRequiredKwargs(self):
proto = unittest_pb2.TestRequired(a=1, b=1, c=1)
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('a'))
self.assertTrue(proto.HasField('b'))
self.assertTrue(proto.HasField('c'))
self.assertTrue(not proto.HasField('dummy2'))
self.assertEqual(1, proto.a)
self.assertEqual(1, proto.b)
self.assertEqual(1, proto.c)
def testInitRequiredForeignKwargs(self):
proto = unittest_pb2.TestRequiredForeign(
optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1))
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_message'))
self.assertTrue(proto.optional_message.IsInitialized())
self.assertTrue(proto.optional_message.HasField('a'))
self.assertTrue(proto.optional_message.HasField('b'))
self.assertTrue(proto.optional_message.HasField('c'))
self.assertTrue(not proto.optional_message.HasField('dummy2'))
self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1),
proto.optional_message)
self.assertEqual(1, proto.optional_message.a)
self.assertEqual(1, proto.optional_message.b)
self.assertEqual(1, proto.optional_message.c)
def testInitRepeatedKwargs(self):
proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertEqual(1, proto.repeated_int32[0])
self.assertEqual(2, proto.repeated_int32[1])
self.assertEqual(3, proto.repeated_int32[2])
class OptionsTest(basetest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testPackedOptions(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_int32 = 1
proto.optional_double = 3.0
for field_descriptor, _ in proto.ListFields():
self.assertEqual(False, field_descriptor.GetOptions().packed)
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.append(1)
proto.packed_double.append(3.0)
for field_descriptor, _ in proto.ListFields():
self.assertEqual(True, field_descriptor.GetOptions().packed)
self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED,
field_descriptor.label)
class ClassAPITest(basetest.TestCase):
def testMakeClassWithNestedDescriptor(self):
leaf_desc = descriptor.Descriptor('leaf', 'package.parent.child.leaf', '',
containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
child_desc = descriptor.Descriptor('child', 'package.parent.child', '',
containing_type=None, fields=[],
nested_types=[leaf_desc], enum_types=[],
extensions=[])
sibling_desc = descriptor.Descriptor('sibling', 'package.parent.sibling',
'', containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
parent_desc = descriptor.Descriptor('parent', 'package.parent', '',
containing_type=None, fields=[],
nested_types=[child_desc, sibling_desc],
enum_types=[], extensions=[])
message_class = reflection.MakeClass(parent_desc)
self.assertIn('child', message_class.__dict__)
self.assertIn('sibling', message_class.__dict__)
self.assertIn('leaf', message_class.child.__dict__)
def _GetSerializedFileDescriptor(self, name):
"""Get a serialized representation of a test FileDescriptorProto.
Args:
name: All calls to this must use a unique message name, to avoid
collisions in the cpp descriptor pool.
Returns:
A string containing the serialized form of a test FileDescriptorProto.
"""
file_descriptor_str = (
'message_type {'
' name: "' + name + '"'
' field {'
' name: "flat"'
' number: 1'
' label: LABEL_REPEATED'
' type: TYPE_UINT32'
' }'
' field {'
' name: "bar"'
' number: 2'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Bar"'
' }'
' nested_type {'
' name: "Bar"'
' field {'
' name: "baz"'
' number: 3'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Baz"'
' }'
' nested_type {'
' name: "Baz"'
' enum_type {'
' name: "deep_enum"'
' value {'
' name: "VALUE_A"'
' number: 0'
' }'
' }'
' field {'
' name: "deep"'
' number: 4'
' label: LABEL_OPTIONAL'
' type: TYPE_UINT32'
' }'
' }'
' }'
'}')
file_descriptor = descriptor_pb2.FileDescriptorProto()
text_format.Merge(file_descriptor_str, file_descriptor)
return file_descriptor.SerializeToString()
def testParsingFlatClassWithExplicitClassDeclaration(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('A'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
class MessageClass(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = msg_descriptor
msg = MessageClass()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingFlatClass(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('B'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingNestedClass(self):
"""Test that the generated class can parse a nested message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('C'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'bar {'
' baz {'
' deep: 4'
' }'
'}')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.bar.baz.deep, 4)
if __name__ == '__main__':
basetest.main()
| bsd-3-clause | -840,442,938,026,564,000 | 40.032038 | 84 | 0.70262 | false |
undoware/neutron-drive | google_appengine/google/appengine/_internal/django/core/servers/fastcgi.py | 23 | 6483 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from google.appengine._internal.django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from google.appengine._internal.django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from google.appengine._internal.django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| bsd-3-clause | 5,639,596,580,653,893,000 | 34.42623 | 95 | 0.630881 | false |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/django/contrib/sessions/models.py | 25 | 2675 | import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils.hashcompat import md5_constructor
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
pickled = pickle.dumps(session_dict)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django website).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'))
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
encoded_data = base64.decodestring(self.session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
from django.core.exceptions import SuspiciousOperation
raise SuspiciousOperation, "User tampered with session cookie."
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
| agpl-3.0 | 8,379,739,849,014,758,000 | 38.925373 | 86 | 0.675514 | false |
janocat/odoo | openerp/modules/loading.py | 131 | 21198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import time
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.modules.registry
import openerp.osv as osv
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models, adapt_version
from module import runs_post_install
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: deprecated parameter, unused, left to avoid changing signature in 8.0
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def load_test(module_name, idref, mode):
cr.commit()
try:
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_test_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
# avoid keeping stale xml_id, etc. in cache
openerp.modules.registry.RegistryManager.clear_caches(cr.dbname)
def _get_files_of_kind(kind):
if kind == 'demo':
kind = ['demo_xml', 'demo']
elif kind == 'data':
kind = ['init_xml', 'update_xml', 'data']
if isinstance(kind, str):
kind = [kind]
files = []
for k in kind:
for f in package.data[k]:
files.append(f)
if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')):
# init_xml, update_xml and demo_xml are deprecated except
# for the case of init_xml with yaml, csv and sql files as
# we can't specify noupdate for those file.
correct_key = 'demo' if k.count('demo') else 'data'
_logger.warning(
"module %s: key '%s' is deprecated in favor of '%s' for file '%s'.",
package.name, k, correct_key, f
)
return files
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
try:
if kind in ('demo', 'test'):
threading.currentThread().testing = True
for filename in _get_files_of_kind(kind):
_logger.info("loading %s/%s", module_name, filename)
noupdate = False
if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')):
noupdate = True
tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report)
finally:
if kind in ('demo', 'test'):
threading.currentThread().testing = False
processed_modules = []
loaded_modules = []
registry = openerp.registry(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.info('loading %d modules...', len(graph))
registry.clear_manual_fields()
# register, instantiate and initialize models for each modules
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
new_install = package.state == 'to install'
if new_install:
py_module = sys.modules['openerp.addons.%s' % (module_name,)]
pre_init = package.info.get('pre_init_hook')
if pre_init:
getattr(py_module, pre_init)(cr)
models = registry.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
registry.setup_models(cr, partial=True)
init_module_models(cr, package.name, models)
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = registry['ir.module.module']
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
_load_data(cr, module_name, idref, mode, kind='data')
has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed')
if has_demo:
_load_data(cr, module_name, idref, mode, kind='demo')
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['demo'], [module_id])
migrations.migrate_module(package, 'post')
if new_install:
post_init = package.info.get('post_init_hook')
if post_init:
getattr(py_module, post_init)(cr, registry)
registry._init_modules.add(package.name)
# validate all the views at a whole
registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name)
if has_demo:
# launch tests only in demo mode, allowing tests to use demo data.
if tools.config.options['test_enable']:
# Yamel test
report.record_result(load_test(module_name, idref, mode))
# Python tests
ir_http = registry['ir.http']
if hasattr(ir_http, '_routing_map'):
# Force routing map to be rebuilt between each module test suite
del(ir_http._routing_map)
report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname))
processed_modules.append(package.name)
ver = adapt_version(package.data['version'])
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]})
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
registry._init_modules.add(package.name)
cr.commit()
_logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
registry.clear_manual_fields()
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
if not module_list:
break
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed:
break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
initialize_sys_path()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
update_module = True # process auto-installed modules
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new registry, just created in
# openerp.modules.registry.RegistryManager.new().
registry = openerp.registry(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = registry._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language'] or update_module:
# some base models are used below, so make sure they are set up
registry.setup_models(cr, partial=True)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = registry['ir.module.module']
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['state'])
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
# IMPORTANT 2: We have to loop here until all relevant modules have been
# processed, because in some rare cases the dependencies have
# changed, and modules that depend on an uninstalled module
# will not be processed on the first pass.
# It's especially useful for migrations.
previously_processed = -1
while previously_processed < len(processed_modules):
previously_processed = len(processed_modules)
processed_modules += load_marked_modules(cr, graph,
['installed', 'to upgrade', 'to remove'],
force, status, report, loaded_modules, update_module)
if update_module:
processed_modules += load_marked_modules(cr, graph,
['to install'], force, status, report,
loaded_modules, update_module)
registry.setup_models(cr)
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
if model in registry and not registry[model].is_transient() and not isinstance(registry[model], openerp.osv.orm.AbstractModel):
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
if model in registry and registry[model].is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
if model in registry:
registry[model]._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
registry['ir.model.data']._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove',))
modules_to_remove = dict(cr.fetchall())
if modules_to_remove:
pkgs = reversed([p for p in graph if p.name in modules_to_remove])
for pkg in pkgs:
uninstall_hook = pkg.info.get('uninstall_hook')
if uninstall_hook:
py_module = sys.modules['openerp.addons.%s' % (pkg.name,)]
getattr(py_module, uninstall_hook)(cr, registry)
registry['ir.module.module'].module_uninstall(cr, SUPERUSER_ID, modules_to_remove.values())
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
openerp.api.Environment.reset()
return openerp.modules.registry.RegistryManager.new(cr.dbname, force_demo, status, update_module)
# STEP 7: verify custom views on every model
if update_module:
Views = registry['ir.ui.view']
custom_view_test = True
for model in registry.models.keys():
if not Views._validate_custom_views(cr, SUPERUSER_ID, model):
custom_view_test = False
_logger.error('invalid custom view(s) for model %s', model)
report.record_result(custom_view_test)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 8: call _register_hook on every model
for model in registry.models.values():
model._register_hook(cr)
# STEP 9: Run the post-install tests
cr.commit()
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
if openerp.tools.config['test_enable']:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
_logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,376,758,463,459,973,000 | 44.784017 | 147 | 0.585385 | false |
phlax/pootle | pootle/apps/pootle_app/views/admin/util.py | 7 | 6759 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import forms
from django.forms.models import modelformset_factory
from django.forms.utils import ErrorList
from django.shortcuts import render
from django.utils.safestring import mark_safe
from pootle.core.paginator import paginate
from pootle.i18n.gettext import ugettext as _
def form_set_as_table(formset, link=None, linkfield='code'):
"""Create an HTML table from the formset. The first form in the
formset is used to obtain a list of the fields that need to be
displayed.
Errors, if there are any, appear in the row above the form which
triggered any errors.
If the forms are based on database models, the order of the
columns is determined by the order of the fields in the model
specification.
"""
def add_header(result, fields, form):
result.append('<tr>\n')
for field in fields:
widget = form.fields[field].widget
widget_name = widget.__class__.__name__
if widget.is_hidden or \
widget_name in ('CheckboxInput', 'SelectMultiple'):
result.append('<th class="sorttable_nosort">')
else:
result.append('<th>')
if widget_name in ('CheckboxInput',):
result.append(form[field].as_widget())
result.append(form[field].label_tag())
elif form.fields[field].label is not None and not widget.is_hidden:
result.append(unicode(form.fields[field].label))
result.append('</th>\n')
result.append('</tr>\n')
def add_footer(result, fields, form):
result.append('<tr>\n')
for field in fields:
field_obj = form.fields[field]
result.append('<td>')
if field_obj.label is not None and not field_obj.widget.is_hidden:
result.append(unicode(field_obj.label))
result.append('</td>\n')
result.append('</tr>\n')
def add_errors(result, fields, form):
# If the form has errors, then we'll add a table row with the
# errors.
if len(form.errors) > 0:
result.append('<tr>\n')
for field in fields:
result.append('<td>')
result.append(form.errors.get(field, ErrorList()).as_ul())
result.append('</td>\n')
result.append('</tr>\n')
def add_widgets(result, fields, form, link):
result.append('<tr class="item">\n')
for i, field in enumerate(fields):
result.append('<td class="%s">' % field)
# Include a hidden element containing the form's id to the
# first column.
if i == 0:
result.append(form['id'].as_hidden())
# `link` indicates whether we put the first field as a link or as
# widget
if field == linkfield and linkfield in form.initial and link:
if callable(link):
result.append(link(form.instance))
result.append(form[field].as_hidden())
else:
result.append(form[field].as_widget())
result.append('</td>\n')
result.append('</tr>\n')
result = []
try:
first_form = formset.forms[0]
# Get the fields of the form, but filter our the 'id' field,
# since we don't want to print a table column for it.
fields = [field for field in first_form.fields if field != 'id']
result.append('<thead>\n')
add_header(result, fields, first_form)
result.append('</thead>\n')
result.append('<tfoot>\n')
add_footer(result, fields, first_form)
result.append('</tfoot>\n')
result.append('<tbody>\n')
# Do not display the delete checkbox for the 'add a new entry' form.
if formset.extra_forms:
formset.forms[-1].fields['DELETE'].widget = forms.HiddenInput()
for form in formset.forms:
add_errors(result, fields, form)
add_widgets(result, fields, form, link)
result.append('</tbody>\n')
except IndexError:
result.append('<tr>\n')
result.append('<td>\n')
result.append(_('No files in this project.'))
result.append('</td>\n')
result.append('</tr>\n')
return u''.join(result)
def process_modelformset(request, model_class, queryset, **kwargs):
"""With the Django model class `model_class` and the given `queryset`,
construct a formset process its submission.
"""
# Create a formset class for the model `model_class` (i.e. it will contain
# forms whose contents are based on the fields of `model_class`);
# parameters for the construction of the forms used in the formset should
# be in kwargs.
formset_class = modelformset_factory(model_class, **kwargs)
if queryset is None:
queryset = model_class.objects.all()
# If the request is a POST, we want to possibly update our data
if request.method == 'POST' and request.POST:
# Create a formset from all the 'model_class' instances whose values
# will be updated using the contents of request.POST
objects = paginate(request, queryset)
formset = formset_class(request.POST, queryset=objects.object_list)
# Validate all the forms in the formset
if formset.is_valid():
# If all is well, Django can save all our data for us
formset.save()
else:
# Otherwise, complain to the user that something went wrong
return formset, _("There are errors in the form. Please review "
"the problems below."), objects
# Hack to force reevaluation of same query
queryset = queryset.filter()
objects = paginate(request, queryset)
return formset_class(queryset=objects.object_list), None, objects
def edit(request, template, model_class, ctx=None,
link=None, linkfield='code', queryset=None, **kwargs):
formset, msg, objects = process_modelformset(request, model_class,
queryset=queryset, **kwargs)
if ctx is None:
ctx = {}
ctx.update({
'formset_text': mark_safe(form_set_as_table(formset, link, linkfield)),
'formset': formset,
'objects': objects,
'error_msg': msg,
'can_add': kwargs.get('extra', 1) != 0,
})
return render(request, template, ctx)
| gpl-3.0 | -7,850,607,531,468,824,000 | 35.33871 | 79 | 0.599497 | false |
emiprotechnologies/connector-magento | __unported__/magentoerpconnect_export_partner/connector.py | 32 | 1031 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.connector.connector import install_in_connector
install_in_connector()
| agpl-3.0 | -8,833,262,907,906,360,000 | 40.24 | 78 | 0.617847 | false |
liyitest/rr | openstack_dashboard/usage/views.py | 19 | 3332 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import base
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
csv_template_name = None
page_title = _("Overview")
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage_class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return (self.csv_template_name or
".".join((self.template_name.rsplit('.', 1)[0], 'csv')))
return self.template_name
def get_content_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
try:
project_id = self.kwargs.get('project_id',
self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve usage information.'))
return []
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
try:
context['simple_tenant_usage_enabled'] = \
api.nova.extension_supported('SimpleTenantUsage', self.request)
except Exception:
context['simple_tenant_usage_enabled'] = True
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
context = self.render_context_with_title(context)
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
| apache-2.0 | 4,509,277,767,616,049,700 | 39.634146 | 79 | 0.605042 | false |
mattclark/osf.io | osf/models/analytics.py | 1 | 8085 | import logging
from dateutil import parser
from django.db import models, transaction
from django.db.models import Sum
from django.db.models.expressions import RawSQL
from django.utils import timezone
from framework.sessions import session
from osf.models.base import BaseModel, Guid
from osf.models.files import BaseFileNode
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
logger = logging.getLogger(__name__)
class UserActivityCounter(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=5, null=False, blank=False, db_index=True,
unique=True) # 5 in prod
action = DateTimeAwareJSONField(default=dict)
date = DateTimeAwareJSONField(default=dict)
total = models.PositiveIntegerField(default=0)
@classmethod
def get_total_activity_count(cls, user_id):
try:
return cls.objects.get(_id=user_id).total
except cls.DoesNotExist:
return 0
@classmethod
def increment(cls, user_id, action, date_string):
date = parser.parse(date_string).strftime('%Y/%m/%d')
with transaction.atomic():
# select_for_update locks the row but only inside a transaction
uac, created = cls.objects.select_for_update().get_or_create(_id=user_id)
if uac.total > 0:
uac.total += 1
else:
uac.total = 1
if action in uac.action:
uac.action[action]['total'] += 1
if date in uac.action[action]['date']:
uac.action[action]['date'][date] += 1
else:
uac.action[action]['date'][date] = 1
else:
uac.action[action] = dict(total=1, date={date: 1})
if date in uac.date:
uac.date[date]['total'] += 1
else:
uac.date[date] = dict(total=1)
uac.save()
return True
class PageCounter(BaseModel):
primary_identifier_name = '_id'
_id = models.CharField(max_length=300, null=False, blank=False, db_index=True,
unique=True) # 272 in prod
date = DateTimeAwareJSONField(default=dict)
total = models.PositiveIntegerField(default=0)
unique = models.PositiveIntegerField(default=0)
action = models.CharField(max_length=128, null=True, blank=True)
resource = models.ForeignKey(Guid, related_name='pagecounters', null=True, blank=True)
file = models.ForeignKey('osf.BaseFileNode', null=True, blank=True, related_name='pagecounters')
version = models.IntegerField(null=True, blank=True)
DOWNLOAD_ALL_VERSIONS_ID_PATTERN = r'^download:[^:]*:{1}[^:]*$'
@classmethod
def get_all_downloads_on_date(cls, date):
"""
Queries the total number of downloads on a date
:param str date: must be formatted the same as a page counter key so 'yyyy/mm/dd'
:return: long sum:
"""
formatted_date = date.strftime('%Y/%m/%d')
# Get all PageCounters with data for the date made for all versions downloads,
# regex insures one colon so all versions are queried.
page_counters = cls.objects.filter(date__has_key=formatted_date, _id__regex=cls.DOWNLOAD_ALL_VERSIONS_ID_PATTERN)
# Get the total download numbers from the nested dict on the PageCounter by annotating it as daily_total then
# aggregating the sum.
daily_total = page_counters.annotate(daily_total=RawSQL("((date->%s->>'total')::int)", (formatted_date,))).aggregate(sum=Sum('daily_total'))['sum']
return daily_total
@staticmethod
def clean_page(page):
return page.replace(
'.', '_'
).replace(
'$', '_'
)
@staticmethod
def deconstruct_id(page):
"""
Backwards compatible code for use in writing to both _id field and
action, resource, file, and version fields simultaneously.
"""
split = page.split(':')
action = split[0]
resource = Guid.load(split[1])
file = BaseFileNode.load(split[2])
if len(split) == 3:
version = None
else:
version = split[3]
return resource, file, action, version
@classmethod
def update_counter(cls, page, node_info):
cleaned_page = cls.clean_page(page)
date = timezone.now()
date_string = date.strftime('%Y/%m/%d')
visited_by_date = session.data.get('visited_by_date', {'date': date_string, 'pages': []})
with transaction.atomic():
# Temporary backwards compat - when creating new PageCounters, temporarily keep writing to _id field.
# After we're sure this is stable, we can stop writing to the _id field, and query on
# resource/file/action/version
resource, file, action, version = cls.deconstruct_id(cleaned_page)
model_instance, created = PageCounter.objects.select_for_update().get_or_create(_id=cleaned_page)
model_instance.resource = resource
model_instance.file = file
model_instance.action = action
model_instance.version = version
# if they visited something today
if date_string == visited_by_date['date']:
# if they haven't visited this page today
if cleaned_page not in visited_by_date['pages']:
# if the model_instance has today in it
if date_string in model_instance.date.keys():
# increment the number of unique visitors for today
model_instance.date[date_string]['unique'] += 1
else:
# set the number of unique visitors for today to 1
model_instance.date[date_string] = dict(unique=1)
# if they haven't visited something today
else:
# set their visited by date to blank
visited_by_date['date'] = date_string
visited_by_date['pages'] = []
# if the model_instance has today in it
if date_string in model_instance.date.keys():
# increment the number of unique visitors for today
model_instance.date[date_string]['unique'] += 1
else:
# set the number of unique visitors to 1
model_instance.date[date_string] = dict(unique=1)
# update their sessions
visited_by_date['pages'].append(cleaned_page)
session.data['visited_by_date'] = visited_by_date
if date_string in model_instance.date.keys():
if 'total' not in model_instance.date[date_string].keys():
model_instance.date[date_string].update(total=0)
model_instance.date[date_string]['total'] += 1
else:
model_instance.date[date_string] = dict(total=1)
# if a download counter is being updated, only perform the update
# if the user who is downloading isn't a contributor to the project
page_type = cleaned_page.split(':')[0]
if page_type in ('download', 'view') and node_info:
if node_info['contributors'].filter(guids___id__isnull=False, guids___id=session.data.get('auth_user_id')).exists():
model_instance.save()
return
visited = session.data.get('visited', [])
if page not in visited:
model_instance.unique += 1
visited.append(page)
session.data['visited'] = visited
session.save()
model_instance.total += 1
model_instance.save()
@classmethod
def get_basic_counters(cls, page):
try:
counter = cls.objects.get(_id=cls.clean_page(page))
return (counter.unique, counter.total)
except cls.DoesNotExist:
return (None, None)
| apache-2.0 | -3,922,777,190,991,896,600 | 40.25 | 155 | 0.587013 | false |
rajrohith/blobstore | azure/storage/blob/appendblobservice.py | 1 | 27806 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .._error import (
_validate_not_none,
_validate_type_bytes,
_validate_encryption_unsupported,
_ERROR_VALUE_NEGATIVE,
)
from .._common_conversion import (
_to_str,
_int_to_str,
_datetime_to_utc_string,
_get_content_md5,
)
from .._serialization import (
_get_data_bytes_only,
_add_metadata_headers,
)
from .._http import HTTPRequest
from ._upload_chunking import (
_AppendBlobChunkUploader,
_upload_blob_chunks,
)
from .models import (
_BlobTypes,
ResourceProperties
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from ._serialization import (
_get_path,
)
from ._deserialization import (
_parse_append_block,
_parse_base_properties,
)
from .baseblobservice import BaseBlobService
from os import path
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
class AppendBlobService(BaseBlobService):
'''
An append blob is comprised of blocks and is optimized for append operations.
When you modify an append blob, blocks are added to the end of the blob only,
via the append_block operation. Updating or deleting of existing blocks is not
supported. Unlike a block blob, an append blob does not expose its block IDs.
Each block in an append blob can be a different size, up to a maximum of 4 MB,
and an append blob can include up to 50,000 blocks. The maximum size of an
append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks).
:ivar int MAX_BLOCK_SIZE:
The size of the blocks put by append_blob_from_* methods. Smaller blocks
may be put if there is less data provided. The maximum block size the service
supports is 4MB.
'''
MAX_BLOCK_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
custom_domain=None, request_session=None, connection_string=None, socket_timeout=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
'''
self.blob_type = _BlobTypes.AppendBlob
super(AppendBlobService, self).__init__(
account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix,
custom_domain, request_session, connection_string, socket_timeout)
def create_blob(self, container_name, blob_name, content_settings=None,
metadata=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Creates a blob or overrides an existing blob. Use if_match=* to
prevent overriding an existing blob.
See create_blob_from_* for high level
functions that handle the creation and upload of large blobs with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to
perform the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-blob-type': _to_str(self.blob_type),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
return self._perform_request(request, _parse_base_properties)
def append_block(self, container_name, blob_name, block,
validate_content=False, maxsize_condition=None,
appendpos_condition=None,
lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Commits a new block of data to the end of an existing append blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param bytes block:
Content of the block in bytes.
:param bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the
AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
ETag, last modified, append offset, and committed block count
properties for the updated Append Blob
:rtype: :class:`~azure.storage.blob.models.AppendBlockProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'appendblock',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-condition-maxsize': _to_str(maxsize_condition),
'x-ms-blob-condition-appendpos': _to_str(appendpos_condition),
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match)
}
request.body = _get_data_bytes_only('block', block)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
return self._perform_request(request, _parse_append_block)
#----Convenience APIs----------------------------------------------
def append_blob_from_path(
self, container_name, blob_name, file_path, validate_content=False,
maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from a file path, with automatic
chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
progress_callback=progress_callback,
lease_id=lease_id,
timeout=timeout)
def append_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from an array of bytes, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param bytes blob:
Content of blob as an array of bytes.
:param int index:
Start index in the array of bytes.
:param int count:
Number of bytes to upload. Set to None or negative value to upload
all bytes starting from index.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('blob', blob)
_validate_not_none('index', index)
_validate_type_bytes('blob', blob)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if index < 0:
raise IndexError(_ERROR_VALUE_NEGATIVE.format('index'))
if count is None or count < 0:
count = len(blob) - index
stream = BytesIO(blob)
stream.seek(index)
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
def append_blob_from_text(
self, container_name, blob_name, text, encoding='utf-8',
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from str/unicode, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str text:
Text to upload to the blob.
:param str encoding:
Python encoding to use to convert the text to bytes.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('text', text)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
if not isinstance(text, bytes):
_validate_not_none('encoding', encoding)
text = text.encode(encoding)
return self.append_blob_from_bytes(
container_name,
blob_name,
text,
index=0,
count=len(text),
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
def append_blob_from_stream(
self, container_name, blob_name, stream, count=None,
validate_content=False, maxsize_condition=None, progress_callback=None,
lease_id=None, timeout=None):
'''
Appends to the content of an existing blob from a file/stream, with
automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param io.IOBase stream:
Opened stream to upload as the blob content.
:param int count:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param int maxsize_condition:
Conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)
# _upload_blob_chunks returns the block ids for block blobs so resource_properties
# is passed as a parameter to get the last_modified and etag for page and append blobs.
# this info is not needed for block_blobs since _put_block_list is called after which gets this info
resource_properties = ResourceProperties()
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
blob_name=blob_name,
blob_size=count,
block_size=self.MAX_BLOCK_SIZE,
stream=stream,
max_connections=1, # upload not easily parallelizable
progress_callback=progress_callback,
validate_content=validate_content,
lease_id=lease_id,
uploader_class=_AppendBlobChunkUploader,
maxsize_condition=maxsize_condition,
timeout=timeout,
resource_properties=resource_properties
)
return resource_properties | apache-2.0 | 2,562,339,810,262,622,700 | 48.740608 | 109 | 0.636599 | false |
SaschaMester/delicium | build/android/pylib/gtest/gtest_test_instance_test.py | 74 | 2367 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pylib.gtest import gtest_test_instance
class GtestTestInstanceTests(unittest.TestCase):
def testParseGTestListTests_simple(self):
raw_output = [
'TestCaseOne.',
' testOne',
' testTwo',
'TestCaseTwo.',
' testThree',
' testFour',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TestCaseOne.testOne',
'TestCaseOne.testTwo',
'TestCaseTwo.testThree',
'TestCaseTwo.testFour',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_typeParameterized_old(self):
raw_output = [
'TPTestCase/WithTypeParam/0.',
' testOne',
' testTwo',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TPTestCase/WithTypeParam/0.testOne',
'TPTestCase/WithTypeParam/0.testTwo',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_typeParameterized_new(self):
raw_output = [
'TPTestCase/WithTypeParam/0. # TypeParam = TypeParam0',
' testOne',
' testTwo',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'TPTestCase/WithTypeParam/0.testOne',
'TPTestCase/WithTypeParam/0.testTwo',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_valueParameterized_old(self):
raw_output = [
'VPTestCase.',
' testWithValueParam/0',
' testWithValueParam/1',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'VPTestCase.testWithValueParam/0',
'VPTestCase.testWithValueParam/1',
]
self.assertEqual(expected, actual)
def testParseGTestListTests_valueParameterized_new(self):
raw_output = [
'VPTestCase.',
' testWithValueParam/0 # GetParam() = 0',
' testWithValueParam/1 # GetParam() = 1',
]
actual = gtest_test_instance.ParseGTestListTests(raw_output)
expected = [
'VPTestCase.testWithValueParam/0',
'VPTestCase.testWithValueParam/1',
]
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | 8,328,131,488,996,295,000 | 26.523256 | 72 | 0.661597 | false |
zhaowenxiang/chisch | chisch/local_settings.py | 1 | 2131 | # -*- coding: utf-8 -*-
"""
Django local_settings for chisch project.
import by 'setting.py.
"""
# Open malicious authentication
OPEN_MRP = False
OPEN_SSM = False
# The name of the certificate
ACCESS_TOKEN_NAME = 'Access-Token'
# universal verify code
UNIVERSAL_VERIFY_CODE = "888888"
ALIYUN_OSS = {
'BUCKET_NAME': 'chisch',
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'ACCESS_KEY_SECRET': '0qMu5s3yHEBrxb2klSyZKnHmOPb0HZ',
'ENDPOINT': 'https://oss-cn-shenzhen.aliyuncs.com/',
'ROLE_ARN': 'acs:ram::1709927201743129:role/aliyunosstokengeneratorrole',
'TokenExpireTime': 60 * 60,
'VIDEO_SIGN_URL_EXPIRES': 60 * 60, # 视频私有链接有效时间60分
'REGION': 'cn-shenzhen',
'ROLE_SESSION_NAME': 'chisch',
'BOUNDARY': '-'*10 + 'chisch' + '-'*10,
'OBJECT_KEYS_SUB_ELEMENTS': {
'upload_user_avatar': {
'path': 'image/user/avatar/',
'only_one': True,
},
'create_curriculum': {
'path': 'image/curriculum/cover/',
'only_one': True,
},
'upload_video': {
'path': 'video/',
'file_suffix': '.mp4',
'only_one': True,
},
}
}
# SMALL_OBJECT_UPPER_LIMIT = 1024 * 1024 * 10 # 小文件的定义规则为大小不超过10M
OBJECT_LOCAL_TRANSFER_DIR = '/tmp/chisch/transfer/' # 本地转存路径
OBJECT_PREFERRED_SIZE = 1024 * 1024 * 2 # 分片大小,2M
IMAGE_TYPE = '.png'
VIDEO_TYPE = '.mp4'
MNS = {
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'ACCESS_KEY_SECRET': '0qMu5s3yHEBrxb2klSyZKnHmOPb0HZ',
'ENDPOINT': 'https://1709927201743129.mns.cn-hangzhou.aliyuncs.com/',
'SIGN_NAME': '千寻教育',
'TEMPLATE_CODE': 'SMS_62440527',
'TOPIC': 'sms.topic-cn-hangzhou',
'QUEUE_NAME': 'verify-code-queue',
}
VOD = {
'USER_UNIQUE': 'tkHh5O7431CgWayx',
'SECRET_KEY': 'f8dc95d9b6d5cdfeb204a8b10ab2af1a',
'FORMAT': 'json',
'VERSION': '2017-03-21',
'ACCESS_KEY_ID': 'LTAIFSaBApB2TuC4',
'SIGNATUREMETHOD': 'HMAC-SHA1',
'SIGNATUREVERSION': '1.0',
'SERVER_URL': 'https://vod.cn-shanghai.aliyuncs.com'
}
| mit | -3,275,899,260,648,494,600 | 26.743243 | 77 | 0.601072 | false |
alivecor/tensorflow | tensorflow/contrib/framework/python/framework/__init__.py | 43 | 1458 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
| apache-2.0 | 921,204,177,520,799,200 | 47.6 | 83 | 0.758573 | false |
keelhaule/alfanous | conf.py | 5 | 8206 | # -*- coding: utf-8 -*-
#
# Alfanous Quranic Search Engine documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 07:42:48 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Alfanous Quranic Search Engine'
copyright = u'2014, Assem Chelli'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.00'
# The full version, including alpha/beta/rc tags.
release = '0.7.00'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AlfanousQuranicSearchEnginedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AlfanousQuranicSearchEngine.tex', u'Alfanous Quranic Search Engine Documentation',
u'Assem Chelli', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'alfanousquranicsearchengine', u'Alfanous Quranic Search Engine Documentation',
[u'Assem Chelli'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AlfanousQuranicSearchEngine', u'Alfanous Quranic Search Engine Documentation',
u'Assem Chelli', 'AlfanousQuranicSearchEngine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 | -4,448,920,611,093,793,000 | 32.222672 | 148 | 0.710943 | false |
harmodio/poesiapueril | poesiapuerilbot.py | 1 | 3499 | #/usr/bin/env python3
# -*- coding: utf8 -*-
##
## Copyright (c) 2017 Jorge Harmodio
## poesiapuerilbot is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## poesiapuerilbot is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Unoporuno. If not, see <http://www.gnu.org/licenses/>.
import tweepy
import time
import urllib3
#from time import sleep
import logging
#Create variables for each key, secret, token
consumer_key = 'PzwW4qupC1qy3562g2wtCfMV1'
consumer_secret = 'wCivK4jYECDUbJCQBn9294EtGsYAgk2Z5V8wyLFM31tNjqNypW'
access_token = '869611298547421184-PfE7owxRC1RLC7bgzp14jdjvCcyjpyV'
access_token_secret = 'aU4r2FCuZtU5d6tR1LbfAbXb0r7MlcejbEso7OlSax7jI'
#Write a tweet to push to our @poesiapueril account
#tweet = 'hola\nsoy\nun\nbot\nde\n#poesíapueril'
#api.update_status(status=tweet)
logging.basicConfig(level=logging.INFO)
logging.info("Starting execution of poesiapuerilbot on " + time.asctime())
#todo: repeated_tweet check must be done with a database
repeated_tweets = []
while True:
#Setup OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
tweets = []
try:
for tweet in tweepy.Cursor(api.search, q='#poesíapueril').items():
tweets.append(tweet)
for tweet in tweepy.Cursor(api.search, q='"poesía pueril"').items():
tweets.append(tweet)
logging.info("Trying to retweet a list of " + str(len(tweets)) + " tweets")
for tweet in tweets:
try:
#Add \n escape character to print() to organize tweets
logging.info('\Tweet by @' + tweet.user.screen_name)
#Retweets tweets as they are found if different from @poesiapueril
if tweet.user.screen_name=="poesiapueril":
logging.info('My tweet: excluded')
elif tweet in repeated_tweets:
logging.info('I already tweeted that one')
else:
tweet.retweet()
logging.info('Retweeted the tweet:ready to sleep')
time.sleep(1800)
except tweepy.TweepError as e:
logging.error(str(e.reason))
if e.args[0][0]['code']==185:
logging.info('Sleeping over a 185 error: User is over daily status limit')
#Catching 'User is over daily status limit' error: we will wait
time.sleep(1800)
elif e.args[0][0]['code']==327:
repeated_tweets.append(tweet)
logging.error('Repeated tweet')
#end of the for: sleep 30min
time.sleep(1800)
except urllib3.exceptions.ProtocolError as err:
logging.error(str(err))
except ConnectionResetError as err:
logging.error(str(err))
except OSError as err:
logging.error(str(err))
| gpl-3.0 | 8,752,771,642,665,801,000 | 40.129412 | 94 | 0.638158 | false |
stephane-martin/salt-debian-packaging | salt-2016.3.2/tests/unit/beacons/adb_beacon_test.py | 2 | 12498 | # coding: utf-8
# Python libs
from __future__ import absolute_import
# Salt libs
from salt.beacons import adb
# Salt testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch, Mock
# Globals
adb.__salt__ = {}
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ADBBeaconTestCase(TestCase):
'''
Test case for salt.beacons.adb
'''
def setUp(self):
adb.last_state = {}
adb.last_state_extra = {'no_devices': False}
def test_no_adb_command(self):
with patch('salt.utils.which') as mock:
mock.return_value = None
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertFalse(ret)
def test_with_adb_command(self):
with patch('salt.utils.which') as mock:
mock.return_value = '/usr/bin/adb'
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertEqual(ret, 'adb')
def test_non_dict_config(self):
config = []
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Configuration for adb beacon must be a dict.')
def test_empty_config(self):
config = {}
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Configuration for adb beacon must include a states array.')
def test_invalid_states(self):
config = {'states': ['Random', 'Failings']}
log_mock = Mock()
adb.log = log_mock
ret = adb.beacon(config)
self.assertEqual(ret, [])
log_mock.info.assert_called_once_with('Need a one of the following adb states:'
' offline, bootloader, device, host, recovery, '
'no permissions, sideload, unauthorized, unknown, missing')
def test_device_state(self):
config = {'states': ['device']}
mock = Mock(return_value='List of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_state_change(self):
config = {'states': ['offline']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'offline', 'tag': 'offline'}])
def test_multiple_devices(self):
config = {'states': ['offline', 'device']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline\nNexus\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [
{'device': 'HTC', 'state': 'offline', 'tag': 'offline'},
{'device': 'Nexus', 'state': 'device', 'tag': 'device'}
])
def test_no_devices_with_different_states(self):
config = {'states': ['offline'], 'no_devices_event': True}
mock = Mock(return_value='List of devices attached\nHTC\tdevice')
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_no_devices_no_repeat(self):
config = {'states': ['offline', 'device'], 'no_devices_event': True}
out = [
'List of devices attached',
'List of devices attached'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'tag': 'no_devices'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_missing(self):
config = {'states': ['device', 'missing']}
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached',
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'missing', 'tag': 'missing'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_with_startup(self):
config = {'states': ['device']}
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_with_user(self):
config = {'states': ['device'], 'user': 'fred'}
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
mock.assert_called_once_with('adb devices', runas='fred')
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_low_battery(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_no_repeat(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_capacity_increase(self):
config = {'states': ['device'], 'battery_low': 75}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'30'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_with_not_found_state(self):
config = {'states': ['offline'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_battery_charged(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'100',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_low_battery_equal(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_battery_not_found(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'/system/bin/sh: cat: /sys/class/power_supply/*/capacity: No such file or directory',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_repeat_multi(self):
config = {'states': ['offline'], 'battery_low': 35}
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'40',
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'80'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_weird_batteries(self):
config = {'states': ['device'], 'battery_low': 25}
out = [
'List of devices attached\nHTC\tdevice',
'-9000',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_multiple_batteries(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25\n40',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_multiple_low_batteries(self):
config = {'states': ['device'], 'battery_low': 30}
out = [
'List of devices attached\nHTC\tdevice',
'25\n14',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
| apache-2.0 | -2,684,216,269,642,742,300 | 33.524862 | 107 | 0.521523 | false |
NCAR/mizuRoute | manage_externals/test/doc/conf.py | 1 | 5279 | # -*- coding: utf-8 -*-
#
# Manage Externals documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 29 10:53:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Manage Externals'
copyright = u'2017, CSEG at NCAR'
author = u'CSEG at NCAR'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ManageExternalsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ManageExternals.tex', u'Manage Externals Documentation',
u'CSEG at NCAR', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'manageexternals', u'Manage Externals Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ManageExternals', u'Manage Externals Documentation',
author, 'ManageExternals', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 | 5,567,753,618,490,878,000 | 29.69186 | 79 | 0.677022 | false |
davgibbs/django | tests/get_object_or_404/models.py | 409 | 1133 | """
DB-API Shortcuts
``get_object_or_404()`` is a shortcut function to be used in view functions for
performing a ``get()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``get()`` call.
``get_list_or_404()`` is a shortcut function to be used in view functions for
performing a ``filter()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``filter()`` call.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ArticleManager(models.Manager):
def get_queryset(self):
return super(ArticleManager, self).get_queryset().filter(authors__name__icontains='sir')
@python_2_unicode_compatible
class Article(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
def __str__(self):
return self.title
| bsd-3-clause | -8,077,363,394,293,510,000 | 28.815789 | 96 | 0.706973 | false |
c7zero/chipsec | chipsec/modules/common/secureboot/variables.py | 9 | 7895 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
"""
`UEFI 2.4 spec Section 28 <http://uefi.org/>`_
Verify that all Secure Boot key/whitelist/blacklist UEFI variables are authenticated (BS+RT+AT)
and protected from unauthorized modification.
Use '-a modify' option for the module to also try to write/corrupt the variables.
"""
from chipsec.module_common import *
import chipsec.file
from chipsec.hal.uefi import *
# ############################################################
# SPECIFY PLATFORMS THIS MODULE IS APPLICABLE TO
# ############################################################
_MODULE_NAME = 'variables'
TAGS = [MTAG_SECUREBOOT]
class variables(BaseModule):
def __init__(self):
BaseModule.__init__(self)
self._uefi = UEFI( self.cs )
def is_supported( self ):
supported = self.cs.helper.EFI_supported()
if not supported: self.logger.log_skipped_check( "OS does not support UEFI Runtime API" )
return supported
def can_modify( self, name, guid, data, attrs ):
self.logger.log( " > attempting to modify variable %s:%s" % (guid,name) )
datalen = len(data)
#print_buffer( data )
baddata = chr( ord(data[0]) ^ 0xFF ) + data[1:]
#if datalen > 1: baddata = baddata[:datalen-1] + chr( ord(baddata[datalen-1]) ^ 0xFF )
status = self._uefi.set_EFI_variable( name, guid, baddata )
if StatusCode.EFI_SUCCESS != status: self.logger.log( ' < modification of %s returned error 0x%X' % (name,status) )
else: self.logger.log( ' < modification of %s returned succees' % name )
self.logger.log( ' > checking variable %s contents after modification..' % name )
newdata = self._uefi.get_EFI_variable( name, guid )
#print_buffer( newdata )
#chipsec.file.write_file( name+'_'+guid+'.bin', data )
#chipsec.file.write_file( name+'_'+guid+'.bin.bad', baddata )
#chipsec.file.write_file( name+'_'+guid+'.bin.new', newdata )
_changed = (data != newdata)
if _changed:
self.logger.log_bad( "EFI variable %s has been modified. Restoring original contents.." % name )
self._uefi.set_EFI_variable( name, guid, data )
# checking if restored correctly
restoreddata = self._uefi.get_EFI_variable( name, guid )
#print_buffer( restoreddata )
if (restoreddata != data): self.logger.error( "Failed to restore contents of variable %s failed!" % name )
else: self.logger.log( " contents of variable %s have been restored" % name )
else:
self.logger.log_good( "Could not modify UEFI variable %s:%s" % (guid,name) )
return _changed
## check_secureboot_variable_attributes
# checks authentication attributes of Secure Boot EFI variables
def check_secureboot_variable_attributes( self, do_modify ):
res = ModuleResult.ERROR
not_found = 0
not_auth = 0
not_wp = 0
is_secureboot_enabled = False
sbvars = self._uefi.list_EFI_variables()
if sbvars is None:
self.logger.log_error_check( 'Could not enumerate UEFI variables (non-UEFI OS?)' )
return ModuleResult.ERROR
for name in SECURE_BOOT_VARIABLES:
if name in sbvars.keys() and sbvars[name] is not None:
if len(sbvars[name]) > 1:
self.logger.log_failed_check( 'There should only be one instance of variable %s' % name )
return ModuleResult.FAILED
for (off, buf, hdr, data, guid, attrs) in sbvars[name]:
self.logger.log( "[*] Checking protections of UEFI variable %s:%s" % (guid,name) )
# check the status of Secure Boot
if EFI_VAR_NAME_SecureBoot == name:
is_secureboot_enabled = (data is not None and len(data) == 1 and ord(data) == 0x1)
#
# Verify if the Secure Boot key/database variable is authenticated
#
if name in SECURE_BOOT_KEY_VARIABLES:
if IS_VARIABLE_ATTRIBUTE( attrs, EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS ):
self.logger.log_good( 'Variable %s:%s is authenticated (AUTHENTICATED_WRITE_ACCESS)' % (guid,name) )
elif IS_VARIABLE_ATTRIBUTE( attrs, EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS ):
self.logger.log_good( 'Variable %s:%s is authenticated (TIME_BASED_AUTHENTICATED_WRITE_ACCESS)' % (guid,name) )
else:
not_auth += 1
self.logger.log_bad( 'Variable %s:%s is not authenticated' % (guid,name) )
#
# Attempt to modify contents of the variables
#
if do_modify:
if self.can_modify( name, guid, data, attrs ): not_wp += 1
else:
not_found += 1
self.logger.log_important( 'Secure Boot variable %s is not found' % name )
continue
self.logger.log( '' )
self.logger.log( '[*] Secure Boot appears to be %sabled' % ('en' if is_secureboot_enabled else 'dis') )
if len(SECURE_BOOT_VARIABLES) == not_found:
# None of Secure Boot variables were not found
self.logger.log_skipped_check( 'None of required Secure Boot variables found. Secure Boot is not enabled' )
return ModuleResult.SKIPPED
else:
# Some Secure Boot variables exist
sb_vars_failed = (not_found > 0) or (not_auth > 0) or (not_wp > 0)
if sb_vars_failed:
if not_found > 0: self.logger.log_bad( "Some required Secure Boot variables are missing" )
if not_auth > 0: self.logger.log_bad( 'Some Secure Boot keying variables are not authenticated' )
if not_wp > 0: self.logger.log_bad( 'Some Secure Boot variables can be modified' )
if is_secureboot_enabled:
self.logger.log_failed_check( 'Not all Secure Boot UEFI variables are protected' )
return ModuleResult.FAILED
else:
self.logger.log_warn_check( 'Not all Secure Boot UEFI variables are protected' )
return ModuleResult.WARNING
else:
self.logger.log_passed_check( 'All Secure Boot UEFI variables are protected' )
return ModuleResult.PASSED
# --------------------------------------------------------------------------
# run( module_argv )
# Required function: run here all tests from this module
# --------------------------------------------------------------------------
def run( self, module_argv ):
self.logger.start_test( "Attributes of Secure Boot EFI Variables" )
do_modify = (len(module_argv) > 0 and module_argv[0] == OPT_MODIFY)
return self.check_secureboot_variable_attributes( do_modify )
| gpl-2.0 | 2,682,047,869,320,711,000 | 43.857955 | 139 | 0.579481 | false |
sebfung/yellowpillowcase | vendor/cache/gems/pygments.rb-0.6.3/vendor/pygments-main/setup.py | 36 | 2931 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image \
formats that PIL supports and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/default.zip#egg=Pygments-dev
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
try:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
from distutils.core import setup
def find_packages(*args, **kwargs):
return [
'pygments',
'pygments.lexers',
'pygments.formatters',
'pygments.styles',
'pygments.filters',
]
have_setuptools = False
if have_setuptools:
add_keywords = dict(
entry_points = {
'console_scripts': ['pygmentize = pygments.cmdline:main'],
},
)
else:
add_keywords = dict(
scripts = ['pygmentize'],
)
setup(
name = 'Pygments',
version = '2.0pre',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
author_email = 'georg@python.org',
description = 'Pygments is a syntax highlighting package written in Python.',
long_description = __doc__,
keywords = 'syntax highlighting',
packages = find_packages(exclude=['ez_setup']),
platforms = 'any',
zip_safe = False,
include_package_data = True,
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Development Status :: 6 - Mature',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
**add_keywords
)
| mit | 8,159,362,825,115,150,000 | 31.566667 | 85 | 0.620266 | false |
munyirik/python | cpython/Lib/test/test_threading.py | 2 | 39155 | """
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -5,379,710,562,470,613,000 | 34.530853 | 89 | 0.55242 | false |
Dino0631/RedRain-Bot | lib/youtube_dl/extractor/alphaporno.py | 64 | 2724 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
parse_duration,
parse_filesize,
int_or_none,
)
class AlphaPornoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?alphaporno\.com/videos/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.alphaporno.com/videos/sensual-striptease-porn-with-samantha-alexandra/',
'md5': 'feb6d3bba8848cd54467a87ad34bd38e',
'info_dict': {
'id': '258807',
'display_id': 'sensual-striptease-porn-with-samantha-alexandra',
'ext': 'mp4',
'title': 'Sensual striptease porn with Samantha Alexandra',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1418694611,
'upload_date': '20141216',
'duration': 387,
'filesize_approx': 54120000,
'tbr': 1145,
'categories': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r"video_id\s*:\s*'([^']+)'", webpage, 'video id', default=None)
video_url = self._search_regex(
r"video_url\s*:\s*'([^']+)'", webpage, 'video url')
ext = self._html_search_meta(
'encodingFormat', webpage, 'ext', default='.mp4')[1:]
title = self._search_regex(
[r'<meta content="([^"]+)" itemprop="description">',
r'class="title" itemprop="name">([^<]+)<'],
webpage, 'title')
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail')
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration'))
filesize_approx = parse_filesize(self._html_search_meta(
'contentSize', webpage, 'file size'))
bitrate = int_or_none(self._html_search_meta(
'bitrate', webpage, 'bitrate'))
categories = self._html_search_meta(
'keywords', webpage, 'categories', default='').split(',')
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'ext': ext,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'filesize_approx': filesize_approx,
'tbr': bitrate,
'categories': categories,
'age_limit': age_limit,
}
| gpl-3.0 | -7,001,275,518,864,416,000 | 34.376623 | 99 | 0.534875 | false |
Dino0631/RedRain-Bot | cogs/lib/youtube_dl/extractor/miomio.py | 61 | 5068 | # coding: utf-8
from __future__ import unicode_literals
import random
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
xpath_text,
int_or_none,
ExtractorError,
sanitized_Request,
)
class MioMioIE(InfoExtractor):
IE_NAME = 'miomio.tv'
_VALID_URL = r'https?://(?:www\.)?miomio\.tv/watch/cc(?P<id>[0-9]+)'
_TESTS = [{
# "type=video" in flashvars
'url': 'http://www.miomio.tv/watch/cc88912/',
'info_dict': {
'id': '88912',
'ext': 'flv',
'title': '【SKY】字幕 铠武昭和VS平成 假面骑士大战FEAT战队 魔星字幕组 字幕',
'duration': 5923,
},
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc184024/',
'info_dict': {
'id': '43729',
'title': '《动漫同人插画绘制》',
},
'playlist_mincount': 86,
'skip': 'Unable to load videos',
}, {
'url': 'http://www.miomio.tv/watch/cc173113/',
'info_dict': {
'id': '173113',
'title': 'The New Macbook 2015 上手试玩与简评'
},
'playlist_mincount': 2,
'skip': 'Unable to load videos',
}, {
# new 'h5' player
'url': 'http://www.miomio.tv/watch/cc273997/',
'md5': '0b27a4b4495055d826813f8c3a6b2070',
'info_dict': {
'id': '273997',
'ext': 'mp4',
'title': 'マツコの知らない世界【劇的進化SP!ビニール傘&冷凍食品2016】 1_2 - 16 05 31',
},
'skip': 'Unable to load videos',
}]
def _extract_mioplayer(self, webpage, video_id, title, http_headers):
xml_config = self._search_regex(
r'flashvars="type=(?:sina|video)&(.+?)&',
webpage, 'xml config')
# skipping the following page causes lags and eventually connection drop-outs
self._request_webpage(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
video_id)
vid_config_request = sanitized_Request(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
headers=http_headers)
# the following xml contains the actual configuration information on the video file(s)
vid_config = self._download_xml(vid_config_request, video_id)
if not int_or_none(xpath_text(vid_config, 'timelength')):
raise ExtractorError('Unable to load videos!', expected=True)
entries = []
for f in vid_config.findall('./durl'):
segment_url = xpath_text(f, 'url', 'video url')
if not segment_url:
continue
order = xpath_text(f, 'order', 'order')
segment_id = video_id
segment_title = title
if order:
segment_id += '-%s' % order
segment_title += ' part %s' % order
entries.append({
'id': segment_id,
'url': segment_url,
'title': segment_title,
'duration': int_or_none(xpath_text(f, 'length', 'duration'), 1000),
'http_headers': http_headers,
})
return entries
def _download_chinese_webpage(self, *args, **kwargs):
# Requests with English locales return garbage
headers = {
'Accept-Language': 'zh-TW,en-US;q=0.7,en;q=0.3',
}
kwargs.setdefault('headers', {}).update(headers)
return self._download_webpage(*args, **kwargs)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_chinese_webpage(
url, video_id)
title = self._html_search_meta(
'description', webpage, 'title', fatal=True)
mioplayer_path = self._search_regex(
r'src="(/mioplayer(?:_h5)?/[^"]+)"', webpage, 'ref_path')
if '_h5' in mioplayer_path:
player_url = compat_urlparse.urljoin(url, mioplayer_path)
player_webpage = self._download_chinese_webpage(
player_url, video_id,
note='Downloading player webpage', headers={'Referer': url})
entries = self._parse_html5_media_entries(player_url, player_webpage, video_id)
http_headers = {'Referer': player_url}
else:
http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path}
entries = self._extract_mioplayer(webpage, video_id, title, http_headers)
if len(entries) == 1:
segment = entries[0]
segment['id'] = video_id
segment['title'] = title
segment['http_headers'] = http_headers
return segment
return {
'_type': 'multi_video',
'id': video_id,
'entries': entries,
'title': title,
'http_headers': http_headers,
}
| gpl-3.0 | 3,032,880,996,017,745,000 | 33.964539 | 118 | 0.534686 | false |
40123242/cdw11-bg3 | static/plugin/liquid_tags/diag.py | 270 | 4096 | """
Blockdiag Tag
---------
This tag implements a liquid style tag for blockdiag [1]. You can use different
diagram types like blockdiag, seqdiag, packetdiag etc. [1]
[1] http://blockdiag.com/en/blockdiag/
Syntax
------
{% blockdiag {
<diagramm type> {
<CODE>
}
}
%}
Examples
--------
{% blockdiag {
blockdiag {
A -> B -> C;
B -> D;
}
}
%}
{% blockdiag {
actdiag {
A -> B -> C -> D -> E;
lane {
A; C; E;
}
lane {
B; D;
}
}
}
%}
{% blockdiag {
packetdiag {
0-7: Source Port
8-15: Destination Port
16-31: Sequence Number
32-47: Acknowledgment Number
}
}
%}
...
Output
------
<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import io
import os
import sys
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% blockdiag [diagram type] [code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<diagram>\w+).*$', re.MULTILINE | re.DOTALL)
_draw_mode = 'PNG'
_publish_mode = 'PNG'
def get_diag(code, command):
""" Generate diagramm and return data """
import tempfile
import shutil
code = code + u'\n'
try:
tmpdir = tempfile.mkdtemp()
fd, diag_name = tempfile.mkstemp(dir=tmpdir)
f = os.fdopen(fd, "w")
f.write(code.encode('utf-8'))
f.close()
format = _draw_mode.lower()
draw_name = diag_name + '.' + format
saved_argv = sys.argv
argv = [diag_name, '-T', format, '-o', draw_name]
if _draw_mode == 'SVG':
argv += ['--ignore-pil']
# Run command
command.main(argv)
# Read image data from file
file_name = diag_name + '.' + _publish_mode.lower()
with io.open(file_name, 'rb') as f:
data = f.read()
f.close()
finally:
for file in os.listdir(tmpdir):
os.unlink(tmpdir + "/" + file)
# os.rmdir will fail -> use shutil
shutil.rmtree(tmpdir)
return data
def diag(code, command):
if command == "blockdiag": # blockdiag
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "diagram": # diagram
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "seqdiag": # seqdiag
import seqdiag.command
return get_diag(code, seqdiag.command)
elif command == "actdiag": # actdiag
import actdiag.command
return get_diag(code, actdiag.command)
elif command == "nwdiag": # nwdiag
import nwdiag.command
return get_diag(code, nwdiag.command)
elif command == "packetdiag": # packetdiag
import packetdiag.command
return get_diag(code, packetdiag.command)
elif command == "rackdiag": # racketdiag
import rackdiag.command
return get_diag(code, rackdiag.command)
else: # not found
print("No such command %s" % command)
return None
@LiquidTags.register("blockdiag")
def blockdiag_parser(preprocessor, tag, markup):
""" Blockdiag parser """
m = DOT_BLOCK_RE.search(markup)
if m:
# Get diagram type and code
diagram = m.group('diagram').strip()
code = markup
# Run command
output = diag(code, diagram)
if output:
# Return Base64 encoded image
return '<div class="blockdiag" style="align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 | 625,922,952,458,892,900 | 22.141243 | 136 | 0.532471 | false |
beezee/GAE-Django-site | django/contrib/gis/geos/tests/test_geos_mutation.py | 245 | 5464 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import django.utils.copycompat as copy
from django.contrib.gis.geos import *
from django.contrib.gis.geos.error import GEOSIndexError
from django.utils import unittest
def getItem(o,i): return o[i]
def delItem(o,i): del o[i]
def setItem(o,i,v): o[i] = v
def api_get_distance(x): return x.distance(Point(-200,-200))
def api_get_buffer(x): return x.buffer(10)
def api_get_geom_typeid(x): return x.geom_typeid
def api_get_num_coords(x): return x.num_coords
def api_get_centroid(x): return x.centroid
def api_get_empty(x): return x.empty
def api_get_valid(x): return x.valid
def api_get_simple(x): return x.simple
def api_get_ring(x): return x.ring
def api_get_boundary(x): return x.boundary
def api_get_convex_hull(x): return x.convex_hull
def api_get_extent(x): return x.extent
def api_get_area(x): return x.area
def api_get_length(x): return x.length
geos_function_tests = [ val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_') ]
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1,2)
for i in range(-2,2): p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0,100)
self.assertEqual(p.coords, (100.0,2.0,3.0), 'Point _set_single')
# _set_list
p._set_list(2,(50,3141))
self.assertEqual(p.coords, (50.0,3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4,5,3)
for p in (Point(1,2,3), fromstr('POINT (1 2 3)')):
p[0:2] = [4,5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1,0),(4,1),(6,-1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0,1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0,(-50,25))
self.assertEqual(ls.coords, ((-50.0,25.0),(4.0,1.0),(6.0,-1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0,25.0),(6.0,-1.0)))
self.assertEqual(ls.coords, ((-50.0,25.0),(6.0,-1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1,0),(4,1),(6,-1),(8,10),(1,0)),
((5,4),(6,4),(6,3),(5,4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1,0),(4,1),(6,-1),(8,10),(1,0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5,4),(6,4),(6,3),(5,4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1,2),(10,0),(12,9),(-1,15),(1,2)),
((4,2),(5,2),(5,3),(4,2))))
self.assertEqual(pg.coords,
(((1.0,2.0),(10.0,0.0),(12.0,9.0),(-1.0,15.0),(1.0,2.0)),
((4.0,2.0),(5.0,2.0),(5.0,3.0),(4.0,2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point,((3,4),(-1,2),(5,-4),(2,8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5,-4), 'Collection _get_single_external')
mp._set_list(3, map(Point,((5,5),(3,-2),(8,1))))
self.assertEqual(mp.coords, ((5.0,5.0),(3.0,-2.0),(8.0,1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point,((5,5),(3,-2),(8,1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSMutationTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| bsd-3-clause | 2,208,856,231,685,410,300 | 38.883212 | 104 | 0.554356 | false |
notriddle/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_terminal.py | 30 | 40391 | """
terminal reporting of the full testing process.
"""
from __future__ import absolute_import, division, print_function
import collections
import sys
import pluggy
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
from _pytest.terminal import build_summary_stats_line, _plugin_nameversions
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
class Option(object):
def __init__(self, verbose=False, fulltrace=False):
self.verbose = verbose
self.fulltrace = fulltrace
@property
def args(self):
values = []
if self.verbose:
values.append("-v")
if self.fulltrace:
values.append("--fulltrace")
return values
@pytest.fixture(
params=[
Option(verbose=False),
Option(verbose=True),
Option(verbose=-1),
Option(fulltrace=True),
],
ids=["default", "verbose", "quiet", "fulltrace"],
)
def option(request):
return request.param
@pytest.mark.parametrize(
"input,expected",
[
([DistInfo(project_name="test", version=1)], ["test-1"]),
([DistInfo(project_name="pytest-test", version=1)], ["test-1"]),
(
[
DistInfo(project_name="test", version=1),
DistInfo(project_name="test", version=1),
],
["test-1"],
),
],
ids=["normal", "prefix-strip", "deduplicate"],
)
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal(object):
def test_pass_skip_fail(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
"""
)
result = testdir.runpytest(*option.args)
if option.verbose:
result.stdout.fnmatch_lines(
[
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
]
)
else:
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
result.stdout.fnmatch_lines(
[" def test_func():", "> assert 0", "E assert 0"]
)
def test_internalerror(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
excinfo = pytest.raises(ValueError, "raise ValueError('hello')")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def test_writeline(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split("\n")
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, testdir, linecomp):
item = testdir.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(
nodeid=item.nodeid, location=location, fspath=str(item.fspath)
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
def test_runtest_location_shown_before_test_starts(self, testdir):
testdir.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
child = testdir.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_itemreport_subclasses_show_subclassed_file(self, testdir):
testdir.makepyfile(
test_p1="""
class BaseTests(object):
def test_p1(self):
pass
class TestClass(BaseTests):
pass
"""
)
p2 = testdir.makepyfile(
test_p2="""
from test_p1 import BaseTests
class TestMore(BaseTests):
pass
"""
)
result = testdir.runpytest(p2)
result.stdout.fnmatch_lines(["*test_p2.py .*", "*1 passed*"])
result = testdir.runpytest("-v", p2)
result.stdout.fnmatch_lines(
["*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*"]
)
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
a = testdir.mkpydir("a123")
a.join("test_hello123.py").write(
_pytest._code.Source(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
)
result = testdir.runpytest("-v")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
assert " <- " not in result.stdout.str()
def test_keyboard_interrupt(self, testdir, option):
testdir.makepyfile(
"""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
"""
)
result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
if option.fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
else:
result.stdout.fnmatch_lines(
["(to show a full traceback on KeyboardInterrupt use --fulltrace)"]
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_keyboard_in_sessionstart(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_collect_single_item(self, testdir):
"""Use singular 'item' when reporting a single test item"""
testdir.makepyfile(
"""
def test_foobar():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
def test_rewrite(self, testdir, monkeypatch):
config = testdir.parseconfig()
f = py.io.TextIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
tr._tw.fullwidth = 10
tr.write("hello")
tr.rewrite("hey", erase=True)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
class TestCollectonly(object):
def test_collectonly_basic(self, testdir):
testdir.makepyfile(
"""
def test_func():
pass
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module 'test_collectonly_basic.py'>", " <Function 'test_func'>"]
)
def test_collectonly_skipped_module(self, testdir):
testdir.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
result = testdir.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_failed_module(self, testdir):
testdir.makepyfile("""raise ValueError(0)""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
def test_collectonly_fatal(self, testdir):
testdir.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
def test_collectonly_simple(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest("--collect-only", p)
# assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*<Module '*.py'>",
"* <Function 'test_func1'*>",
"* <Class 'TestClass'>",
# "* <Instance '()'>",
"* <Function 'test_method'*>",
]
)
def test_collectonly_error(self, testdir):
p = testdir.makepyfile("import Errlkjqweqwe")
result = testdir.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
_pytest._code.Source(
"""
*ERROR*
*ImportError*
*No module named *Errlk*
*1 error*
"""
).strip()
)
def test_collectonly_missing_path(self, testdir):
"""this checks issue 115,
failure in parseargs will cause session
not to have the items attribute
"""
result = testdir.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(["*ERROR: file not found*"])
def test_collectonly_quiet(self, testdir):
testdir.makepyfile("def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
def test_collectonly_more_quiet(self, testdir):
testdir.makepyfile(test_fun="def test_foo(): pass")
result = testdir.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
def test_repr_python_version(monkeypatch):
try:
monkeypatch.setattr(sys, "version_info", (2, 5, 1, "final", 0))
assert repr_pythonversion() == "2.5.1-final-0"
sys.version_info = x = (2, 3)
assert repr_pythonversion() == str(x)
finally:
monkeypatch.undo() # do this early as pytest can get confused
class TestFixtureReporting(object):
def test_setup_fixture_error(self, testdir):
testdir.makepyfile(
"""
def setup_function(function):
print ("setup func")
assert 0
def test_nada():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, testdir):
testdir.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print ("teardown func")
assert 0
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, testdir):
testdir.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print ("teardown func")
assert False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, testdir):
""" Test for issue #442 """
testdir.makepyfile(
"""
def setup_function(function):
print ("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print ("teardown func")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
class TestTerminalFunctional(object):
def test_deselected(self, testdir):
testpath = testdir.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = testdir.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected", "*test_deselected.py ..*"]
)
assert result.ret == 0
def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.foo
def test_foobar():
pass
@pytest.mark.bar
def test_bar():
pass
def test_pass():
pass
"""
)
result = testdir.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected",
"*test_show_des*.py ..*",
"*= 2 passed, 1 deselected in * =*",
]
)
assert "= 1 deselected =" not in result.stdout.str()
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
testdir.makepyfile(
"""
import pytest
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("dontshow")
"""
)
result = testdir.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, testdir):
p1 = testdir.makepyfile(
"""
def test_passes():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
old = p1.dirpath().chdir()
try:
result = testdir.runpytest()
finally:
old.chdir()
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
def test_header_trailer_info(self, testdir):
testdir.makepyfile(
"""
def test_passes():
pass
"""
)
result = testdir.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
"*===== test session starts ====*",
"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
py.__version__,
pluggy.__version__,
),
"*test_header_trailer_info.py .*",
"=* 1 passed*in *.[0-9][0-9] seconds *=",
]
)
if pytest.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
def test_showlocals(self, testdir):
p1 = testdir.makepyfile(
"""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
"""
)
result = testdir.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
# "_ _ * Locals *",
"x* = 3",
"y* = 'xxxxxx*",
]
)
def test_verbose_reporting(self, testdir, pytestconfig):
p1 = testdir.makepyfile(
"""
import pytest
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass(object):
def test_skip(self):
pytest.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
"""
)
result = testdir.runpytest(p1, "-v")
result.stdout.fnmatch_lines(
[
"*test_verbose_reporting.py::test_fail *FAIL*",
"*test_verbose_reporting.py::test_pass *PASS*",
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
"*test_verbose_reporting.py::test_gen*0* *FAIL*",
]
)
assert result.ret == 1
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
result = testdir.runpytest(p1, "-v", "-n 1")
result.stdout.fnmatch_lines(["*FAIL*test_verbose_reporting.py::test_fail*"])
assert result.ret == 1
def test_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" in s
def test_more_quiet_reporting(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
result = testdir.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.basename not in s
assert "===" not in s
assert "passed" not in s
def test_report_collectionfinish_hook(self, testdir):
testdir.makeconftest(
"""
def pytest_report_collectionfinish(config, startdir, items):
return ['hello from hook: {0} items'.format(len(items))]
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
def test(i):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
def test_fail_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result = testdir.runpytest("-rf")
result.stdout.fnmatch_lines(["*test summary*", "FAIL*test_fail_extra_reporting*"])
def test_fail_reporting_on_pass(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("-rf")
assert "short test summary" not in result.stdout.str()
def test_pass_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result = testdir.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
def test_pass_reporting_on_fail(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest("-rp")
assert "short test summary" not in result.stdout.str()
def test_pass_output_reporting(testdir):
testdir.makepyfile(
"""
def test_pass_output():
print("Four score and seven years ago...")
"""
)
result = testdir.runpytest()
assert "Four score and seven years ago..." not in result.stdout.str()
result = testdir.runpytest("-rP")
result.stdout.fnmatch_lines(["Four score and seven years ago..."])
def test_color_yes(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=yes")
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
def test_color_no(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" not in result.stdout.str()
@pytest.mark.parametrize("verbose", [True, False])
def test_color_yes_collection_on_non_atty(testdir, verbose):
"""skip collect progress report when working on non-terminals.
#1397
"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes"]
if verbose:
args.append("-vv")
result = testdir.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
assert "collecting 10 items" not in result.stdout.str()
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
def test_getreportopt():
class Config(object):
class Option(object):
reportchars = ""
disable_warnings = True
option = Option()
config = Config()
config.option.reportchars = "sf"
assert getreportopt(config) == "sf"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfx"
config.option.reportchars = "sfx"
config.option.disable_warnings = False
assert getreportopt(config) == "sfxw"
config.option.reportchars = "sfxw"
config.option.disable_warnings = False
assert getreportopt(config) == "sfxw"
def test_terminalreporter_reportopt_addopts(testdir):
testdir.makeini("[pytest]\naddopts=-rs")
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def tr(request):
tr = request.config.pluginmanager.getplugin("terminalreporter")
return tr
def test_opt(tr):
assert tr.hasopt('skipped')
assert not tr.hasopt('qwe')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_tbstyle_short(testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
return 42
def test_opt(arg):
x = 0
assert x
"""
)
result = testdir.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
result.stdout.fnmatch_lines(["*%s:8*" % p.basename, " assert x", "E assert*"])
result = testdir.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
def test_traceconfig(testdir, monkeypatch):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == EXIT_NOTESTSCOLLECTED
class TestGenericReporting(object):
""" this test class can be subclassed with a different option
provider to run e.g. distributed tests.
"""
def test_collect_fail(self, testdir, option):
testdir.makepyfile("import xyz\n")
result = testdir.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
def test_maxfailures(self, testdir, option):
testdir.makepyfile(
"""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
"""
)
result = testdir.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
["*def test_1():*", "*def test_2():*", "*2 failed*"]
)
def test_tb_option(self, testdir, option):
testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func():
print (6*7)
g() # --calling--
"""
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
result = testdir.runpytest("--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print (6*7)" in s
else:
assert "print (6*7)" not in s
if tbopt != "no":
assert "--calling--" in s
assert "IndexError" in s
else:
assert "FAILURES" not in s
assert "--calling--" not in s
assert "IndexError" not in s
def test_tb_crashline(self, testdir, option):
p = testdir.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func1():
print (6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
"""
)
result = testdir.runpytest("--tb=line")
bn = p.basename
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, testdir, option):
testdir.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
def pytest_report_header(config):
return "hello: %s" % config._somevalue
"""
)
testdir.mkdir("a").join("conftest.py").write(
"""
def pytest_report_header(config, startdir):
return ["line1", str(startdir)]
"""
)
result = testdir.runpytest("a")
result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(testdir.tmpdir)])
def test_show_capture(self, testdir):
testdir.makepyfile(
"""
import sys
import logging
def test_one():
sys.stdout.write('!This is stdout!')
sys.stderr.write('!This is stderr!')
logging.warning('!This is a warning log msg!')
assert False, 'Something failed'
"""
)
result = testdir.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
result = testdir.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
stdout = testdir.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = testdir.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
stdout = testdir.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(testdir):
testdir.makepyfile(
"""
import os, sys
k = []
def test_open_file_and_keep_alive(capfd):
stdout = os.fdopen(1, 'w', 1)
k.append(stdout)
def test_close_kept_alive_file():
stdout = k.pop()
stdout.close()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_tbstyle_native_setup_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
"""
)
result = testdir.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
def test_terminal_summary(testdir):
testdir.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.section("hello")
w.line("world")
w.line("exitstatus: {0}".format(exitstatus))
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
world
exitstatus: 5
"""
)
def test_terminal_summary_warnings_are_displayed(testdir):
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
testdir.makeconftest(
"""
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
config.warn('C1', 'internal warning')
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
["<undetermined location>", "*internal warning", "*== 1 warnings in *"]
)
assert "None" not in result.stdout.str()
@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
# The method under test only cares about the length of each
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
("red", "1 failed", {"failed": (1,)}),
("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
("red", "1 error", {"error": (1,)}),
("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
# (a status that's not known to the code)
("yellow", "1 weird", {"weird": (1,)}),
("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
("yellow", "1 warnings", {"warnings": (1,)}),
("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
("yellow", "1 skipped", {"skipped": (1,)}),
("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
("yellow", "1 deselected", {"deselected": (1,)}),
("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
("yellow", "1 xfailed", {"xfailed": (1,)}),
("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
("yellow", "1 xpassed", {"xpassed": (1,)}),
("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
# Likewise if no tests were found at all
("yellow", "no tests ran", {}),
# Test the empty-key special case
("yellow", "no tests ran", {"": (1,)}),
("green", "1 passed", {"": (1,), "passed": (1,)}),
# A couple more complex combinations
(
"red",
"1 failed, 2 passed, 3 xfailed",
{"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)},
),
(
"green",
"1 passed, 2 skipped, 3 deselected, 2 xfailed",
{
"passed": (1,),
"skipped": (1, 2),
"deselected": (1, 2, 3),
"xfailed": (1, 2),
},
),
],
)
def test_summary_stats(exp_line, exp_color, stats_arg):
print("Based on stats: %s" % stats_arg)
print('Expect summary: "%s"; with color "%s"' % (exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg)
print('Actually got: "%s"; with color "%s"' % (line, color))
assert line == exp_line
assert color == exp_color
def test_no_trailing_whitespace_after_inifile_word(testdir):
result = testdir.runpytest("")
assert "inifile:\n" in result.stdout.str()
testdir.makeini("[pytest]")
result = testdir.runpytest("")
assert "inifile: tox.ini\n" in result.stdout.str()
class TestProgress(object):
@pytest.fixture
def many_tests_files(self, testdir):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): pass
""",
)
def test_zero_tests_collected(self, testdir):
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
testdir.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
for node_id in ('nodeid1', 'nodeid2'):
rep = CollectReport(node_id, 'passed', None, None)
rep.when = 'passed'
rep.duration = 0.1
config.hook.pytest_runtest_logreport(report=rep)
"""
)
output = testdir.runpytest()
assert "ZeroDivisionError" not in output.stdout.str()
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, testdir):
output = testdir.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
r"test_foo.py \.{5} \s+ \[ 75%\]",
r"test_foobar.py \.{5} \s+ \[100%\]",
]
)
def test_verbose(self, many_tests_files, testdir):
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]",
]
)
def test_xdist_normal(self, many_tests_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
def test_xdist_verbose(self, many_tests_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]",
]
)
def test_capture_no(self, many_tests_files, testdir):
output = testdir.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
output = testdir.runpytest("--capture=no")
assert "%]" not in output.stdout.str()
class TestProgressWithTeardown(object):
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(self, testdir, contest_with_teardown_fixture):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, testdir, contest_with_teardown_fixture
):
testdir.makepyfile(
"""
def test_foo(fail_teardown):
assert False
"""
)
output = testdir.runpytest()
output.stdout.re_match_lines(
[r"test_teardown_with_test_also_failing.py FE\s+\[100%\]"]
)
def test_teardown_many(self, testdir, many_files):
output = testdir.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(self, testdir, many_files):
output = testdir.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED\s+\[ 5%\]",
r"test_bar.py::test_bar\[0\] ERROR\s+\[ 5%\]",
r"test_bar.py::test_bar\[4\] PASSED\s+\[ 25%\]",
r"test_bar.py::test_bar\[4\] ERROR\s+\[ 25%\]",
]
)
def test_xdist_normal(self, many_files, testdir):
pytest.importorskip("xdist")
output = testdir.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
| mpl-2.0 | 5,908,586,696,443,622,000 | 30.879242 | 106 | 0.517566 | false |
dago/ansible-modules-extras | notification/nexmo.py | 153 | 3819 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: nexmo
short_description: Send a SMS via nexmo
description:
- Send a SMS message via nexmo
version_added: 1.6
author: "Matt Martz (@sivel)"
options:
api_key:
description:
- Nexmo API Key
required: true
api_secret:
description:
- Nexmo API Secret
required: true
src:
description:
- Nexmo Number to send from
required: true
dest:
description:
- Phone number(s) to send SMS message to
required: true
msg:
description:
- Message to text to send. Messages longer than 160 characters will be
split into multiple messages
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices:
- 'yes'
- 'no'
"""
EXAMPLES = """
- name: Send notification message via Nexmo
local_action:
module: nexmo
api_key: 640c8a53
api_secret: 0ce239a6
src: 12345678901
dest:
- 10987654321
- 16789012345
msg: "{{ inventory_hostname }} completed"
"""
import urllib
NEXMO_API = 'https://rest.nexmo.com/sms/json'
def send_msg(module):
failed = list()
responses = dict()
msg = {
'api_key': module.params.get('api_key'),
'api_secret': module.params.get('api_secret'),
'from': module.params.get('src'),
'text': module.params.get('msg')
}
for number in module.params.get('dest'):
msg['to'] = number
url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg))
headers = dict(Accept='application/json')
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
failed.append(number)
responses[number] = dict(failed=True)
try:
responses[number] = json.load(response)
except:
failed.append(number)
responses[number] = dict(failed=True)
else:
for message in responses[number]['messages']:
if int(message['status']) != 0:
failed.append(number)
responses[number] = dict(failed=True, **responses[number])
if failed:
msg = 'One or messages failed to send'
else:
msg = ''
module.exit_json(failed=bool(failed), msg=msg, changed=False,
responses=responses)
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dict(
api_key=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
src=dict(required=True, type='int'),
dest=dict(required=True, type='list'),
msg=dict(required=True),
),
)
module = AnsibleModule(
argument_spec=argument_spec
)
send_msg(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | -4,732,420,763,299,700,000 | 26.085106 | 82 | 0.618748 | false |
camilonova/sentry | src/sentry/web/forms/__init__.py | 15 | 3194 | """
sentry.web.forms
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.constants import HTTP_METHODS
from sentry.models import User, Activity
from sentry.web.forms.fields import RadioFieldRenderer, ReadOnlyTextField
class ReplayForm(forms.Form):
url = forms.URLField(widget=forms.TextInput(attrs={'class': 'span8'}))
method = forms.ChoiceField(choices=((k, k) for k in HTTP_METHODS))
data = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'span8'}))
headers = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'span8'}))
def clean_headers(self):
value = self.cleaned_data.get('headers')
if not value:
return
return dict(line.split(': ', 1) for line in value.splitlines())
class BaseUserForm(forms.ModelForm):
email = forms.EmailField()
first_name = forms.CharField(required=True, label=_('Name'))
class NewUserForm(BaseUserForm):
create_project = forms.BooleanField(required=False,
help_text=_("Create a project for this user."))
send_welcome_mail = forms.BooleanField(required=False,
help_text=_("Send this user a welcome email which will contain their generated password."))
class Meta:
fields = ('first_name', 'username', 'email')
model = User
class ChangeUserForm(BaseUserForm):
is_staff = forms.BooleanField(required=False, label=_('Admin'),
help_text=_("Designates whether this user can perform administrative functions."))
is_superuser = forms.BooleanField(required=False, label=_('Superuser'),
help_text=_('Designates whether this user has all permissions without '
'explicitly assigning them.'))
class Meta:
fields = ('first_name', 'username', 'email', 'is_active', 'is_staff',
'is_superuser')
model = User
def __init__(self, *args, **kwargs):
super(ChangeUserForm, self).__init__(*args, **kwargs)
self.user = kwargs['instance']
if self.user.is_managed:
self.fields['username'] = ReadOnlyTextField(label="Username (managed)")
def clean_username(self):
if self.user.is_managed:
return self.user.username
return self.cleaned_data['username']
class RemoveUserForm(forms.Form):
removal_type = forms.ChoiceField(choices=(
('1', _('Disable the account.')),
('2', _('Permanently remove the user and their data.')),
), widget=forms.RadioSelect(renderer=RadioFieldRenderer))
class TestEmailForm(forms.Form):
pass
class NewNoteForm(forms.Form):
text = forms.CharField(widget=forms.Textarea(attrs={'class': 'span8'}))
def save(self, event, user):
activity = Activity.objects.create(
group=event.group, event=event, project=event.project,
type=Activity.NOTE, user=user,
data=self.cleaned_data
)
activity.send_notification()
return activity
| bsd-3-clause | -8,581,853,727,075,517,000 | 32.978723 | 99 | 0.659048 | false |
mrgloom/DeepCL | cog-batteries/cog_fluent.py | 6 | 4556 | # Copyright Hugh Perkins 2014,2015 hughperkins at gmail
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import cog
def go(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int ' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl('float ' + thisfloat + ' = 0;')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisintTitlecase + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloatTitlecase + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def go1b(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int ' + thisint + ';')
for thisfloat in floats:
cog.outl('float ' + thisfloat + ';')
cog.outl(classname + '() {')
for thisint in ints:
cog.outl(' ' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl(' ' + thisfloat + ' = 0;')
cog.outl('}')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisintTitlecase + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloatTitlecase + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def gov2(classname, ints = [], floats = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int _' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl('float _' + thisfloat + ' = 0;')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisint + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->_' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloat + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->_' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
def gov3(classname, ints = [], floats = [], strings = []):
cog.outl( '// generated, using cog:' )
for thisint in ints:
cog.outl('int _' + thisint + ';')
for thisfloat in floats:
cog.outl('float _' + thisfloat + ';')
for thisstring in strings:
cog.outl('std::string _' + thisstring + ';')
cog.outl(classname + '() {')
for thisint in ints:
cog.outl(' _' + thisint + ' = 0;')
for thisfloat in floats:
cog.outl(' _' + thisfloat + ' = 0;')
for thisstring in strings:
cog.outl(' _' + thisstring + ' = "";')
cog.outl('}')
for thisint in ints:
thisintTitlecase = thisint[0].upper() + thisint[1:]
cog.outl(classname + ' ' + thisint + '( int ' + '_' + thisint + ' ) {')
cog.outl(' this->_' + thisint + ' = _' + thisint + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisfloat in floats:
thisfloatTitlecase = thisfloat[0].upper() + thisfloat[1:]
cog.outl(classname + ' ' + thisfloat + '( float ' + '_' + thisfloat + ' ) {')
cog.outl(' this->_' + thisfloat + ' = _' + thisfloat + ';')
cog.outl(' return *this;')
cog.outl('}')
for thisstring in strings:
thisstringTitlecase = thisstring[0].upper() + thisstring[1:]
cog.outl(classname + ' ' + thisstring + '( std::string ' + '_' + thisstring + ' ) {')
cog.outl(' this->_' + thisstring + ' = _' + thisstring + ';')
cog.outl(' return *this;')
cog.outl('}')
| mpl-2.0 | -1,334,337,233,916,262,700 | 41.981132 | 94 | 0.513169 | false |
gmalmquist/pants | contrib/cpp/src/python/pants/contrib/cpp/targets/cpp_binary.py | 14 | 1101 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.contrib.cpp.targets.cpp_target import CppTarget
class CppBinary(CppTarget):
"""A C++ binary."""
def __init__(self,
libraries=None,
*args,
**kwargs):
"""
:param libraries: Libraries that this target depends on that are not pants targets.
For example, 'm' or 'rt' that are expected to be installed on the local system.
:type libraries: List of libraries to link against.
"""
payload = Payload()
payload.add_fields({
'libraries': PrimitiveField(libraries)
})
super(CppBinary, self).__init__(payload=payload, **kwargs)
@property
def libraries(self):
return self.payload.get_field_value('libraries')
| apache-2.0 | -1,457,806,170,085,769,000 | 31.382353 | 93 | 0.673025 | false |
jhsenjaliya/incubator-airflow | airflow/api/common/experimental/trigger_dag.py | 9 | 1706 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from airflow.exceptions import AirflowException
from airflow.models import DagRun, DagBag
from airflow.utils.state import State
def trigger_dag(dag_id, run_id=None, conf=None, execution_date=None):
dagbag = DagBag()
if dag_id not in dagbag.dags:
raise AirflowException("Dag id {} not found".format(dag_id))
dag = dagbag.get_dag(dag_id)
if not execution_date:
execution_date = datetime.datetime.utcnow()
assert isinstance(execution_date, datetime.datetime)
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
raise AirflowException("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
run_conf = json.loads(conf)
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
return trigger
| apache-2.0 | -402,296,971,686,159,040 | 27.915254 | 79 | 0.678781 | false |
Pirata-Repository/Pirata | plugin.video.movie25/resources/libs/documentaries/documentary.py | 2 | 18086 | import urllib,urllib2,re,cookielib,urlresolver,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
def LISTDOC(murl):
if murl=='doc1':
main.GA("Documantary","DhHome")
#main.addDir('[COLOR red]Search[/COLOR]','search',89,'')
main.addDir('[COLOR red]Popular[/COLOR]','http://documentaryheaven.com/popular/',89,'')
main.addDir('[COLOR red]Recent[/COLOR]','http://documentaryheaven.com/all/',87,'')
url='http://documentaryheaven.com/'
link=main.OPENURL(url)
match=re.compile('<li class=".+?"><a href="(.+?)" title=".+?">(.+?)</a> </li>').findall(link)
for url, name in match:
main.addDir(name,'http://documentaryheaven.com'+url,87,'')
elif murl=='doc2':
main.GA("Documantary","TDFHome")
main.addDir('[COLOR red]Recent[/COLOR]','http://topdocumentaryfilms.com/all/',87,'')
main.addDir('[COLOR red]Recommended[/COLOR]','rec',89,'')
url='http://topdocumentaryfilms.com/'
link=main.OPENURL(url)
match=re.compile('href="(.+?)" title=".+?">(.+?)</a>.+?</li>').findall(link)
for url, name in match:
main.addDir(name,url,87,'')
elif murl=='doc3':
main.GA("Documantary","DLHome")
main.addDir('[COLOR red]Latest[/COLOR]','http://www.documentary-log.com/',87,'')
main.addDir("[COLOR red]Editor's Picks[/COLOR]",'http://www.documentary-log.com/category/editors-picks/',87,'')
url='http://www.documentary-log.com/'
link=main.OPENURL(url)
match=re.compile('<li class="cat-item cat-item-.+?"><a href="(.+?)" title="(.+?)">(.+?)</a> ([^<]+)').findall(link)
for url, desc, name, leng in match:
main.addDirc(name+' '+leng,url,87,'',desc,'','','','')
def LISTDOC2(murl):
match=re.compile('documentaryheaven').findall(murl)
if (len(match)>0):
main.GA("DhHome","Dh-List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match= re.compile('href="([^<]+)" rel="bookmark" title=".+?" rel=".+?"><img class=".+?" src="(.+?)" alt="([^<]+)"/></a></div><div class=".+?">(.+?)</div>').findall(link)
if (len(match)==0):
match=re.compile('href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile('<a href="([^<]+)" >Next →</a>').findall(link)
if (len(paginate)>0):
main.addDir('[COLOR blue]Next Page[/COLOR]',paginate[0],87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
match2=re.compile('topdocumentaryfilms').findall(murl)
if (len(match2)>0):
i=0
main.GA("TDFHome","TDF-List")
link=main.OPENURL(murl)
link=link.replace('\n','')
url=re.compile('href="([^<]+)">Watch now').findall(link)
match=re.compile('href=".+?".+?src="(.+?)".+?alt="(.+?)"').findall(link)
desc=re.compile('>([^<]+)</p><p><strong>').findall(link)
for thumb,name in match:
main.addPlayMs(name,url[i],88,thumb,desc[i],'','','','')
i=i+1
paginate=re.compile('</a>.+?href="([^<]+)">Next</a></div>').findall(link)
if (len(paginate)>0):
for purl in paginate:
main.addDir('[COLOR blue]Next[/COLOR]',purl,87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
match3=re.compile('documentary-log').findall(murl)
if (len(match3)>0):
main.GA("DLHome","DL-List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<div class="clear">.+?<a href="(.+?)" title=".+?"> <img src="(.+?)" alt="(.+?)" class=".+?" /> </a> <p>(.+?)<a').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile("<a href='([^<]+)' class='nextpostslink'>").findall(link)
if (len(paginate)>0):
for purl in paginate:
main.addDir('[COLOR blue]Next[/COLOR]',purl,87,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
def LISTDOCPOP(murl):
if murl=='search':
keyb = xbmc.Keyboard('', 'Search Documentaries')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
surl='http://documentaryheaven.com/find/?q='+encode
link=main.OPENURL(surl)
match=re.compile('<a href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
if (len(match)==0):
match=re.compile('href="(.+?)" title="" rel=".+?"><img class=".+?" src="(.+?)" alt="(.+?)".+?</a>\n </div> \n <div id="postDis">\n \t(.+?)[...]').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
paginate=re.compile("<span class=\'page current\'>1</span></li><li><a href=\'http://documentaryheaven.com/page/2/.?s=.+?\'").findall(link)
if (len(paginate)>0):
main.addDir('[COLOR blue]Page 2[/COLOR]','http://documentaryheaven.com/page/2/?s='+encode,9,"%s/art/next2.png"%selfAddon.getAddonInfo("path"))
elif murl=='rec':
rurl='http://topdocumentaryfilms.com/'
link=main.OPENURL(rurl)
match=re.compile('href="([^<]+)">([^<]+)</a></li><li><a').findall(link)
for url,name in match:
main.addPlayMs(name,url,88,'','','','','','')
else:
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<a href=\'(.+?)/\'><img src=\'(.+?)\'/></a></div><div class=".+?"><div class=".+?"><a href=\'.+?/\'>(.+?)</a></div><div class=".+?"><p>(.+?)</div>').findall(link)
for url,thumb,name,desc in match:
main.addPlayMs(name,url,88,thumb,desc,'','','','')
def LINKDOC(mname,murl,thumb):
ok=True
match=re.compile('documentaryheaven').findall(murl)
if (len(match)>0):
main.GA("DocumentaryHeaven","Watched")
ok=True
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
match=re.compile('<iframe frameborder=".+?" width=".+?" height=".+?" src="http:(.+?)">').findall(link)
if (len(match)==0):
match=re.compile('<iframe width=".+?" height=".+?" src="http:(.+?)" frameborder=".+?" allowfullscreen>').findall(link)
if (len(match)==0):
match=re.compile('<embe.+?src="http:([^<]+)".+?></embed>').findall(link)
for url in match:
url='http:'+url
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match8=re.compile('youtube').findall(url)
if (len(match8)>0):
match2=re.compile('http://www.youtube.com/embed/([^<]+)').findall(url)
url='http://www.youtube.com/watch?v='+match2[0]
if (len(match)==0):
match=re.compile('<iframe src="http:(.+?)" width=".+?" height=".+?" frameborder=".+?".+?</iframe>').findall(link)
for url in match:
url='http:'+url
match4=re.compile('vimeo').findall(url)
match6=re.compile('putlocker').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
elif (len(match6)>0):
url=url
else:
match2=re.compile('http://www.youtube.com/embed/([^<]+)').findall(url)
if (len(match2)==0):
match2=re.compile('http://www.youtube.com/p/([^<]+).?hl=.+?').findall(link)
url='http://www.youtube.com/watch?v='+match2[0]
listitem = xbmcgui.ListItem(mname)
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Doc-Heaven[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
match2=re.compile('topdocumentaryfilms').findall(murl)
if (len(match2)>0):
sources=[]
main.GA("TopDocumentaryFilms","Watched")
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
ok=True
link=link.replace('src="http://cdn.tdfimg.com/wp-content/uploads','')
match=re.compile('src="(.+?)"').findall(link)
for url in match:
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match7=re.compile('google').findall(url)
if (len(match7)>0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,link down,3000)")
return
match6=re.compile('youtube').findall(url)
if (len(match6)>0):
match=re.compile('http://www.youtube.com/embed/n_(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v=n_'+match[0]
else:
match=re.compile('http://www.youtube.com/embed/(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v='+match[0]
match2=re.compile('videoseries').findall(url)
if (len(match2)>0):
link2=main.OPENURL(url)
match2=re.compile('href="/watch.?v=(.+?)"').findall(link2)
match3=re.compile("http://www.youtube.com/embed/videoseries.?list=(.+?)&.+?load_policy=.+?").findall(url)
print match3[0]
try:
url='http://www.youtube.com/watch?v='+match2[0]
except:
namelist=[]
urllist=[]
link=main.OPENURL('https://gdata.youtube.com/feeds/api/playlists/'+match3[0]+'?start-index=1&max-results=50')
match=re.compile("href='https://m.youtube.com/details.?v=(.+?)'/.+?<media\:descriptio[^>]+>([^<]+)</media\:description>.+?<media\:thumbnail url='([^']+)'.+?<media:title type='plain'>(.+?)/media:title>").findall(link)
for url,desc,thumb,name in match:
name=name.replace('<','')
namelist.append(name)
urllist.append(url)
dialog = xbmcgui.Dialog()
answer =dialog.select("Playlist", namelist)
url='http://www.youtube.com/watch?v='+urllist[int(answer)]
else:
url=url.replace('?rel=0','')
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Top-Doc[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
match3=re.compile('documentary-log.com').findall(murl)
if (len(match3)>0):
main.GA("Documentary-Log","Watched")
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
link=main.OPENURL(murl)
link=link.replace('src="http://cdn.tdfimg.com/wp-content/uploads','')
match=re.compile('src="(.+?)" .+?></iframe>').findall(link)
if (len(match)==0):
link=link.replace('src="http://www.documentary-log.com/wp-cont','')
match=re.compile('src="(.+?)" .+?/>').findall(link)
for url in match:
match4=re.compile('vimeo').findall(url)
if (len(match4)>0):
url=url.replace('?title=0&byline=0&portrait=0','')
url=url.replace('http://player.vimeo.com/video','http://vimeo.com')
match5=re.compile('dailymotion').findall(url)
if (len(match5)>0):
url=url.replace('http://www.dailymotion.com/embed/video','http://www.dailymotion.com/video')
match7=re.compile('google').findall(url)
if (len(match7)>0):
xbmc.executebuiltin("XBMC.Notification(Sorry!,link down,3000)")
return
match6=re.compile('youtube').findall(url)
if (len(match6)>0):
match=re.compile('http://www.youtube.com/embed/n_(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v=n_'+match[0]
else:
match=re.compile('http://www.youtube.com/embed/(.+?).?rel=0&iv_load_policy=3').findall(url)
if (len(match)>0):
url='http://www.youtube.com/watch?feature=player_embedded&v='+match[0]
match2=re.compile('videoseries').findall(url)
if (len(match2)>0):
link2=main.OPENURL(url)
match2=re.compile('href="/watch.?v=(.+?)"').findall(link2)
match3=re.compile("http://www.youtube.com/embed/videoseries.?list=(.+?)&iv_load_policy=3").findall(url)
print match3
url='http://www.youtube.com/watch?v='+match2[0]
else:
url=url.replace('?rel=0','')
print "vlink " +str(url)
try:
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)")
stream_url = main.resolve_url(str(url))
if(stream_url == False):
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Cannot Be Resolved,5000)")
return
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]Doc-Log[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
except Exception, e:
if stream_url != False:
main.ErrorReport(e)
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Link Not Playable,5000)")
return ok
| gpl-2.0 | -2,529,665,593,083,284,000 | 54.140244 | 256 | 0.525876 | false |
Polyconseil/tornado | tornado/test/template_test.py | 63 | 18390 | from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import traceback
from tornado.escape import utf8, native_str, to_unicode
from tornado.template import Template, DictLoader, ParseError, Loader
from tornado.test.util import unittest
from tornado.util import u, ObjectDict, unicode_type
class TemplateTest(unittest.TestCase):
def test_simple(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name="Ben"),
b"Hello Ben!")
def test_bytes(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_expressions(self):
template = Template("2 + 2 = {{ 2 + 2 }}")
self.assertEqual(template.generate(), b"2 + 2 = 4")
def test_comment(self):
template = Template("Hello{# TODO i18n #} {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_include(self):
loader = DictLoader({
"index.html": '{% include "header.html" %}\nbody text',
"header.html": "header text",
})
self.assertEqual(loader.load("index.html").generate(),
b"header text\nbody text")
def test_extends(self):
loader = DictLoader({
"base.html": """\
<title>{% block title %}default title{% end %}</title>
<body>{% block body %}default body{% end %}</body>
""",
"page.html": """\
{% extends "base.html" %}
{% block title %}page title{% end %}
{% block body %}page body{% end %}
""",
})
self.assertEqual(loader.load("page.html").generate(),
b"<title>page title</title>\n<body>page body</body>\n")
def test_relative_load(self):
loader = DictLoader({
"a/1.html": "{% include '2.html' %}",
"a/2.html": "{% include '../b/3.html' %}",
"b/3.html": "ok",
})
self.assertEqual(loader.load("a/1.html").generate(),
b"ok")
def test_escaping(self):
self.assertRaises(ParseError, lambda: Template("{{"))
self.assertRaises(ParseError, lambda: Template("{%"))
self.assertEqual(Template("{{!").generate(), b"{{")
self.assertEqual(Template("{%!").generate(), b"{%")
self.assertEqual(Template("{{ 'expr' }} {{!jquery expr}}").generate(),
b"expr {{jquery expr}}")
def test_unicode_template(self):
template = Template(utf8(u("\u00e9")))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_unicode_literal_expression(self):
# Unicode literals should be usable in templates. Note that this
# test simulates unicode characters appearing directly in the
# template file (with utf8 encoding), i.e. \u escapes would not
# be used in the template file itself.
if str is unicode_type:
# python 3 needs a different version of this test since
# 2to3 doesn't run on template internals
template = Template(utf8(u('{{ "\u00e9" }}')))
else:
template = Template(utf8(u('{{ u"\u00e9" }}')))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_custom_namespace(self):
loader = DictLoader({"test.html": "{{ inc(5) }}"}, namespace={"inc": lambda x: x + 1})
self.assertEqual(loader.load("test.html").generate(), b"6")
def test_apply(self):
def upper(s):
return s.upper()
template = Template(utf8("{% apply upper %}foo{% end %}"))
self.assertEqual(template.generate(upper=upper), b"FOO")
def test_unicode_apply(self):
def upper(s):
return to_unicode(s).upper()
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_bytes_apply(self):
def upper(s):
return utf8(to_unicode(s).upper())
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_if(self):
template = Template(utf8("{% if x > 4 %}yes{% else %}no{% end %}"))
self.assertEqual(template.generate(x=5), b"yes")
self.assertEqual(template.generate(x=3), b"no")
def test_if_empty_body(self):
template = Template(utf8("{% if True %}{% else %}{% end %}"))
self.assertEqual(template.generate(), b"")
def test_try(self):
template = Template(utf8("""{% try %}
try{% set y = 1/x %}
{% except %}-except
{% else %}-else
{% finally %}-finally
{% end %}"""))
self.assertEqual(template.generate(x=1), b"\ntry\n-else\n-finally\n")
self.assertEqual(template.generate(x=0), b"\ntry-except\n-finally\n")
def test_comment_directive(self):
template = Template(utf8("{% comment blah blah %}foo"))
self.assertEqual(template.generate(), b"foo")
def test_break_continue(self):
template = Template(utf8("""\
{% for i in range(10) %}
{% if i == 2 %}
{% continue %}
{% end %}
{{ i }}
{% if i == 6 %}
{% break %}
{% end %}
{% end %}"""))
result = template.generate()
# remove extraneous whitespace
result = b''.join(result.split())
self.assertEqual(result, b"013456")
def test_break_outside_loop(self):
try:
Template(utf8("{% break %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
def test_break_in_apply(self):
# This test verifies current behavior, although of course it would
# be nice if apply didn't cause seemingly unrelated breakage
try:
Template(utf8("{% for i in [] %}{% apply foo %}{% break %}{% end %}{% end %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
@unittest.skipIf(sys.version_info >= division.getMandatoryRelease(),
'no testable future imports')
def test_no_inherit_future(self):
# This file has from __future__ import division...
self.assertEqual(1 / 2, 0.5)
# ...but the template doesn't
template = Template('{{ 1 / 2 }}')
self.assertEqual(template.generate(), '0')
class StackTraceTest(unittest.TestCase):
def test_error_line_number_expression(self):
loader = DictLoader({"test.html": """one
two{{1/0}}
three
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_directive(self):
loader = DictLoader({"test.html": """one
two{%if 1/0%}
three{%end%}
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_module(self):
loader = DictLoader({
"base.html": "{% module Template('sub.html') %}",
"sub.html": "{{1/0}}",
}, namespace={"_tt_modules": ObjectDict({"Template": lambda path, **kwargs: loader.load(path).generate(**kwargs)})})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue('# base.html:1' in exc_stack)
self.assertTrue('# sub.html:1' in exc_stack)
def test_error_line_number_include(self):
loader = DictLoader({
"base.html": "{% include 'sub.html' %}",
"sub.html": "{{1/0}}",
})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:1 (via base.html:1)" in
traceback.format_exc())
def test_error_line_number_extends_base_error(self):
loader = DictLoader({
"base.html": "{{1/0}}",
"sub.html": "{% extends 'base.html' %}",
})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue("# base.html:1" in exc_stack)
def test_error_line_number_extends_sub_error(self):
loader = DictLoader({
"base.html": "{% block 'block' %}{% end %}",
"sub.html": """
{% extends 'base.html' %}
{% block 'block' %}
{{1/0}}
{% end %}
"""})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:4 (via base.html:1)" in
traceback.format_exc())
def test_multi_includes(self):
loader = DictLoader({
"a.html": "{% include 'b.html' %}",
"b.html": "{% include 'c.html' %}",
"c.html": "{{1/0}}",
})
try:
loader.load("a.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# c.html:1 (via b.html:1, a.html:1)" in
traceback.format_exc())
class ParseErrorDetailTest(unittest.TestCase):
def test_details(self):
loader = DictLoader({
"foo.html": "\n\n{{",
})
with self.assertRaises(ParseError) as cm:
loader.load("foo.html")
self.assertEqual("Missing end expression }} at foo.html:3",
str(cm.exception))
self.assertEqual("foo.html", cm.exception.filename)
self.assertEqual(3, cm.exception.lineno)
class AutoEscapeTest(unittest.TestCase):
def setUp(self):
self.templates = {
"escaped.html": "{% autoescape xhtml_escape %}{{ name }}",
"unescaped.html": "{% autoescape None %}{{ name }}",
"default.html": "{{ name }}",
"include.html": """\
escaped: {% include 'escaped.html' %}
unescaped: {% include 'unescaped.html' %}
default: {% include 'default.html' %}
""",
"escaped_block.html": """\
{% autoescape xhtml_escape %}\
{% block name %}base: {{ name }}{% end %}""",
"unescaped_block.html": """\
{% autoescape None %}\
{% block name %}base: {{ name }}{% end %}""",
# Extend a base template with different autoescape policy,
# with and without overriding the base's blocks
"escaped_extends_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}""",
"escaped_overrides_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"unescaped_extends_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}""",
"unescaped_overrides_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"raw_expression.html": """\
{% autoescape xhtml_escape %}\
expr: {{ name }}
raw: {% raw name %}""",
}
def test_default_off(self):
loader = DictLoader(self.templates, autoescape=None)
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_default_on(self):
loader = DictLoader(self.templates, autoescape="xhtml_escape")
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_unextended_block(self):
loader = DictLoader(self.templates)
name = "<script>"
self.assertEqual(loader.load("escaped_block.html").generate(name=name),
b"base: <script>")
self.assertEqual(loader.load("unescaped_block.html").generate(name=name),
b"base: <script>")
def test_extended_block(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name="<script>")
self.assertEqual(render("escaped_extends_unescaped.html"),
b"base: <script>")
self.assertEqual(render("escaped_overrides_unescaped.html"),
b"extended: <script>")
self.assertEqual(render("unescaped_extends_escaped.html"),
b"base: <script>")
self.assertEqual(render("unescaped_overrides_escaped.html"),
b"extended: <script>")
def test_raw_expression(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name='<>&"')
self.assertEqual(render("raw_expression.html"),
b"expr: <>&"\n"
b"raw: <>&\"")
def test_custom_escape(self):
loader = DictLoader({"foo.py":
"{% autoescape py_escape %}s = {{ name }}\n"})
def py_escape(s):
self.assertEqual(type(s), bytes)
return repr(native_str(s))
def render(template, name):
return loader.load(template).generate(py_escape=py_escape,
name=name)
self.assertEqual(render("foo.py", "<html>"),
b"s = '<html>'\n")
self.assertEqual(render("foo.py", "';sys.exit()"),
b"""s = "';sys.exit()"\n""")
self.assertEqual(render("foo.py", ["not a string"]),
b"""s = "['not a string']"\n""")
def test_manual_minimize_whitespace(self):
# Whitespace including newlines is allowed within template tags
# and directives, and this is one way to avoid long lines while
# keeping extra whitespace out of the rendered output.
loader = DictLoader({'foo.txt': """\
{% for i in items
%}{% if i > 0 %}, {% end %}{#
#}{{i
}}{% end
%}""",
})
self.assertEqual(loader.load("foo.txt").generate(items=range(5)),
b"0, 1, 2, 3, 4")
def test_whitespace_by_filename(self):
# Default whitespace handling depends on the template filename.
loader = DictLoader({
"foo.html": " \n\t\n asdf\t ",
"bar.js": " \n\n\n\t qwer ",
"baz.txt": "\t zxcv\n\n",
"include.html": " {% include baz.txt %} \n ",
"include.txt": "\t\t{% include foo.html %} ",
})
# HTML and JS files have whitespace compressed by default.
self.assertEqual(loader.load("foo.html").generate(),
b"\nasdf ")
self.assertEqual(loader.load("bar.js").generate(),
b"\nqwer ")
# TXT files do not.
self.assertEqual(loader.load("baz.txt").generate(),
b"\t zxcv\n\n")
# Each file maintains its own status even when included in
# a file of the other type.
self.assertEqual(loader.load("include.html").generate(),
b" \t zxcv\n\n\n")
self.assertEqual(loader.load("include.txt").generate(),
b"\t\t\nasdf ")
def test_whitespace_by_loader(self):
templates = {
"foo.html": "\t\tfoo\n\n",
"bar.txt": "\t\tbar\n\n",
}
loader = DictLoader(templates, whitespace='all')
self.assertEqual(loader.load("foo.html").generate(), b"\t\tfoo\n\n")
self.assertEqual(loader.load("bar.txt").generate(), b"\t\tbar\n\n")
loader = DictLoader(templates, whitespace='single')
self.assertEqual(loader.load("foo.html").generate(), b" foo\n")
self.assertEqual(loader.load("bar.txt").generate(), b" bar\n")
loader = DictLoader(templates, whitespace='oneline')
self.assertEqual(loader.load("foo.html").generate(), b" foo ")
self.assertEqual(loader.load("bar.txt").generate(), b" bar ")
def test_whitespace_directive(self):
loader = DictLoader({
"foo.html": """\
{% whitespace oneline %}
{% for i in range(3) %}
{{ i }}
{% end %}
{% whitespace all %}
pre\tformatted
"""})
self.assertEqual(loader.load("foo.html").generate(),
b" 0 1 2 \n pre\tformatted\n")
class TemplateLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = Loader(os.path.join(os.path.dirname(__file__), "templates"))
def test_utf8_in_file(self):
tmpl = self.loader.load("utf8.html")
result = tmpl.generate()
self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
| apache-2.0 | 7,295,087,187,409,388,000 | 37.232848 | 124 | 0.545459 | false |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/test/test_startfile.py | 77 | 1201 | # Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
# use this form so that the test is skipped when startfile is not available:
from os import startfile, path
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, startfile, u"nonexisting.vbs")
def test_empty(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
def test_empty_u(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(unicode(empty, "mbcs"))
startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
| gpl-2.0 | 4,059,184,933,307,990,000 | 31.459459 | 76 | 0.691091 | false |
eLBati/odoo | addons/sale_journal/__init__.py | 443 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,675,248,885,532,506,000 | 41.68 | 78 | 0.614808 | false |
Plain-Andy-legacy/android_external_chromium_org | tools/telemetry/telemetry/web_components/web_components_project.py | 45 | 1359 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from trace_viewer import trace_viewer_project
def _FindAllFilesRecursive(source_paths, pred):
all_filenames = set()
for source_path in source_paths:
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
if f.startswith('.'):
continue
x = os.path.abspath(os.path.join(dirpath, f))
if pred(x):
all_filenames.add(x)
return all_filenames
class WebComponentsProject(trace_viewer_project.TraceViewerProject):
telemetry_path = os.path.abspath(util.GetTelemetryDir())
def __init__(self, *args, **kwargs):
super(WebComponentsProject, self).__init__(*args, **kwargs)
exclude_paths = [os.path.join(self.telemetry_path, 'docs'),
os.path.join(self.telemetry_path, 'unittest_data'),
os.path.join(self.telemetry_path, 'support')]
excluded_html_files = _FindAllFilesRecursive(
exclude_paths,
lambda x: x.endswith('.html'))
self.non_module_html_files.extend(excluded_html_files)
self.non_module_html_files.appendRel(self.telemetry_path, 'results.html')
self.source_paths.append(self.telemetry_path)
| bsd-3-clause | -8,483,411,306,923,649,000 | 32.975 | 77 | 0.679176 | false |
songmonit/CTTMSONLINE | addons/account/wizard/account_change_currency.py | 385 | 3751 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_change_currency(osv.osv_memory):
_name = 'account.change.currency'
_description = 'Change Currency'
_columns = {
'currency_id': fields.many2one('res.currency', 'Change to', required=True, help="Select a currency to apply on the invoice"),
}
def view_init(self, cr , uid , fields_list, context=None):
obj_inv = self.pool.get('account.invoice')
if context is None:
context = {}
if context.get('active_id',False):
if obj_inv.browse(cr, uid, context['active_id']).state != 'draft':
raise osv.except_osv(_('Error!'), _('You can only change currency for Draft Invoice.'))
pass
def change_currency(self, cr, uid, ids, context=None):
obj_inv = self.pool.get('account.invoice')
obj_inv_line = self.pool.get('account.invoice.line')
obj_currency = self.pool.get('res.currency')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
new_currency = data.currency_id.id
invoice = obj_inv.browse(cr, uid, context['active_id'], context=context)
if invoice.currency_id.id == new_currency:
return {}
rate = obj_currency.browse(cr, uid, new_currency, context=context).rate
for line in invoice.invoice_line:
new_price = 0
if invoice.company_id.currency_id.id == invoice.currency_id.id:
new_price = line.price_unit * rate
if new_price <= 0:
raise osv.except_osv(_('Error!'), _('New currency is not configured properly.'))
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id == new_currency:
old_rate = invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = line.price_unit / old_rate
if invoice.company_id.currency_id.id != invoice.currency_id.id and invoice.company_id.currency_id.id != new_currency:
old_rate = invoice.currency_id.rate
if old_rate <= 0:
raise osv.except_osv(_('Error!'), _('Current currency is not configured properly.'))
new_price = (line.price_unit / old_rate ) * rate
obj_inv_line.write(cr, uid, [line.id], {'price_unit': new_price})
obj_inv.write(cr, uid, [invoice.id], {'currency_id': new_currency}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,110,865,850,051,920,000 | 47.714286 | 132 | 0.597974 | false |
ma-compbio/PEP | genVecs.py | 1 | 7271 | #encoding:utf-8
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import pandas as pd
import numpy as np
import os
import sys
import math
import random
import processSeq
import warnings
import threading
from multiprocessing.dummy import Pool as ThreadPool
from sklearn import preprocessing
import sklearn.preprocessing
from gensim import corpora, models, similarities
class mycorpuse(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_enhancer_GM12878"):
yield line.split()
class mycorpusp(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_promoter_GM12878"):
yield line.split()
# Load training data
def getData(type,cell):
data = pd.read_table('./Data/Learning/supervised_'+str(cell)+"_"+str(type))
return data
# Load trained Word2Vec model or train a new model
def getWord_model(word,num_features,min_count,type,cell):
word_model1 = ""
model_name = str(cell)+"_enhancer"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_enhancer_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model1 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window =context, sample=downsampling, seed=1)
word_model1.init_sims(replace=False)
word_model1.save(model_name)
print word_model1.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model1 = Word2Vec.load(model_name)
word_model2 = ""
model_name = str(cell)+"_promoter"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_promoter_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model2 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
word_model2.init_sims(replace=False)
word_model2.save(model_name)
print word_model2.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model2 = Word2Vec.load(model_name)
return word_model1,word_model2
# Split sequences into words
def getCleanDNA_split(DNAdata,word):
dnalist = []
counter = 0
for dna in DNAdata:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata)),
sys.stdout.flush()
dna = str(dna).upper()
dnalist.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
print
return dnalist
def makeFeatureVecs(words, model, num_features,word,k,temp):
featureVec = np.zeros((k,num_features), dtype="float32")
nwords = 0
index2word_set = set(model.index2word)
length = len(words)
for word in words:
if word in index2word_set:
# divide the words into k parts, add up in each part
featureVec[math.floor((nwords * k) / length)] += (model[word]) * temp[nwords]
nwords =nwords + 1
featureVec = featureVec.reshape(k * num_features)
#featureVec = featureVec/nwords
return featureVec
def mean2max(vec):
length = len(vec)
mean1 = np.max(vec[0:int(length*0.5)],axis = 0)
mean2 = np.max(vec[int(length*0.5):int(length)],axis = 0)
maxvec = np.mean([mean1,mean2],axis = 0)
return maxvec
def getAvgFeatureVecs(data,model1,model2, num_features, word,k,type,cell):
dnaFeatureVecs = np.zeros((len(data),2*k*num_features), dtype="float32")
if not os.path.isfile("./Data/enhancertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpuse()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/enhancerdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/enhancertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/enhancertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/enhancerdic"+str(cell))
dict1 = {k:v for k, v in dictionary.items()}
DNAdata1 = getCleanDNA_split(data["seq1"],word)
counter = 0
for dna in DNAdata1:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata1)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][0:k*num_features] += model1[dict1[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
del DNAdata1
counter = 0
if not os.path.isfile("./Data/promotertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpusp()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/promoterdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/promotertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/promotertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/promoterdic"+str(cell))
dict2 = {k:v for k, v in dictionary.items()}
DNAdata2 = []
counter = 0
for dna in data["seq2"]:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(data)),
sys.stdout.flush()
dna = str(dna).upper()
DNAdata2.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
counter = 0
print
for dna in DNAdata2:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata2)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][k*num_features:2*k*num_features] += model2[dict2[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
np.save("./Datavecs/datavecs_"+str(cell)+"_"+str(type)+".npy",dnaFeatureVecs)
return dnaFeatureVecs
def run(word, num_features,K,type,cell):
warnings.filterwarnings("ignore")
global word_model,data,k
word = int(word)
num_features = int(num_features)
k=int(K)
word_model=""
min_count=10
word_model1,word_model2 = getWord_model(word,num_features,min_count,type,cell)
# Read data
data = getData(type,cell)
length = data.shape[0]
print length
print "Generating Training and Testing Vector"
dataDataVecs = getAvgFeatureVecs(data,word_model1,word_model2,num_features,word,k,type,cell)
if __name__ == "__main__":
run(6,300,1,'new','GM12878')
| mit | -1,418,906,687,575,747,000 | 29.206009 | 111 | 0.683537 | false |
Karosuo/Linux_tools | xls_handlers/xls_sum_venv/lib/python3.6/site-packages/wheel/cli/pack.py | 8 | 2263 | from __future__ import print_function
import os.path
import re
import sys
from wheel.cli import WheelError
from wheel.wheelfile import WheelFile
DIST_INFO_RE = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))\.dist-info$")
def pack(directory, dest_dir, build_number):
"""Repack a previously unpacked wheel directory into a new wheel file.
The .dist-info/WHEEL file must contain one or more tags so that the target
wheel file name can be determined.
:param directory: The unpacked wheel directory
:param dest_dir: Destination directory (defaults to the current directory)
"""
# Find the .dist-info directory
dist_info_dirs = [fn for fn in os.listdir(directory)
if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)]
if len(dist_info_dirs) > 1:
raise WheelError('Multiple .dist-info directories found in {}'.format(directory))
elif not dist_info_dirs:
raise WheelError('No .dist-info directories found in {}'.format(directory))
# Determine the target wheel filename
dist_info_dir = dist_info_dirs[0]
name_version = DIST_INFO_RE.match(dist_info_dir).group('namever')
# Add the build number if specific
if build_number:
name_version += '-' + build_number
# Read the tags from .dist-info/WHEEL
with open(os.path.join(directory, dist_info_dir, 'WHEEL')) as f:
tags = [line.split(' ')[1].rstrip() for line in f if line.startswith('Tag: ')]
if not tags:
raise WheelError('No tags present in {}/WHEEL; cannot determine target wheel filename'
.format(dist_info_dir))
# Reassemble the tags for the wheel file
impls = sorted({tag.split('-')[0] for tag in tags})
abivers = sorted({tag.split('-')[1] for tag in tags})
platforms = sorted({tag.split('-')[2] for tag in tags})
tagline = '-'.join(['.'.join(impls), '.'.join(abivers), '.'.join(platforms)])
# Repack the wheel
wheel_path = os.path.join(dest_dir, '{}-{}.whl'.format(name_version, tagline))
with WheelFile(wheel_path, 'w') as wf:
print("Repacking wheel as {}...".format(wheel_path), end='')
sys.stdout.flush()
wf.write_files(directory)
print('OK')
| gpl-3.0 | -1,602,685,190,057,727,500 | 38.017241 | 98 | 0.642952 | false |
brianhelba/pylibtiff | libtiff/tiff_sample_plane.py | 2 | 11678 | """ Implements TIFF sample plane.
"""
# Author: Pearu Peterson
# Created: Jan 2011
import numpy
import tif_lzw
__all__ = ['TiffSamplePlane']
def set_array(output_array, input_array):
dtype = numpy.uint8
numpy.frombuffer(output_array.data, dtype=dtype)[:] = numpy.frombuffer(input_array.data, dtype=dtype)
class TiffSamplePlane:
""" Image of a single sample in a TIFF image file directory.
"""
def __init__(self, ifd, sample_index=0):
""" Construct TiffSamplePlane instance.
Parameters
----------
ifd : `libtiff.tiff_file.IFDEntry`
sample_index : int
Specify sample index. When None then interpret pixel as a sample.
"""
self.ifd = ifd
self.sample_index = sample_index
self.planar_config = planar_config = ifd.get_value('PlanarConfiguration')
self.samples_per_pixel = samples_per_pixel = ifd.get_value('SamplesPerPixel')
if sample_index is not None and sample_index >= samples_per_pixel:
raise IndexError ('sample index %r must be less that nof samples %r' % (sample_index, samples_per_pixel))
pixels_per_row = ifd.get_value('ImageWidth')
rows_of_pixels = ifd.get_value('ImageLength')
self.shape = (int(rows_of_pixels), int(pixels_per_row))
rows_per_strip = ifd.get_value('RowsPerStrip')
strips_per_image = (rows_of_pixels + rows_per_strip - 1) // rows_per_strip
rows_per_strip = min(rows_of_pixels, rows_per_strip)
self.rows_per_strip = rows_per_strip
self.strip_offsets = strip_offsets = ifd.get_value('StripOffsets')
self.strip_nbytes = strip_nbytes = ifd.get_value('StripByteCounts')
self.sample_format = sample_format = ifd.get_value('SampleFormat')
self.bits_per_sample = bits_per_sample = ifd.get_value('BitsPerSample')
bits_per_pixel = sum(bits_per_sample)
assert bits_per_pixel % 8==0, repr((bits_per_pixel, bits_per_sample))
bytes_per_pixel = bits_per_pixel // 8
if sample_index is None:
bytes_per_sample = bytes_per_pixel
else:
bytes_per_sample = bits_per_sample[sample_index] // 8
bytes_per_row = bytes_per_pixel * pixels_per_row
bytes_per_strip = rows_per_strip * bytes_per_row
sample_names = ifd.get_sample_names()
pixel_dtype = ifd.get_pixel_dtype()
sample_offset = 0
if sample_index is None:
dtype = pixel_dtype
sample_names = ['pixel']
sample_name = 'pixel'
else:
dtype = ifd.get_sample_dtypes ()[sample_index]
sample_name = sample_names[sample_index]
if planar_config==1:
sample_offset = sum(bits_per_sample[:sample_index]) // 8
bytes_per_row = pixels_per_row * bytes_per_pixel # uncompressed
sample_offset = 0
if planar_config==1 or sample_index is None:
bytes_per_sample_row = bytes_per_row
else:
bytes_per_sample_row = bytes_per_row // samples_per_pixel
self.dtype = dtype
self.pixel_dtype = pixel_dtype
self.bytes_per_pixel = bytes_per_pixel
self.bytes_per_row = bytes_per_row
self.bytes_per_sample_image = bytes_per_sample_row * rows_of_pixels
self.uncompressed_bytes_per_strip = bytes_per_strip
self.compression = compression = ifd.get_value('Compression')
self.sample_name = sample_name
self.sample_offset = sample_offset
self.bytes_per_sample_row = bytes_per_sample_row
self.strips_per_image = strips_per_image
self.is_contiguous = compression==1 and ifd.is_contiguous()
time = None
descr = str(ifd.get_value('ImageDescription', human=True))
if descr is not None:
if descr.startswith('<?xml') or descr[:4].lower()=='<ome':
pass
else:
it = descr.find('RelativeTime')
if it != -1:
time = float(descr[it:].split (None, 2)[1].strip())
self.time = time
def set_time (self, time):
if None not in [self.time, time]:
if self.time!=time:
print('%s:warning: overwriting time value %s with %s' % (self.__class__.__name__, self.time, time))
self.time = time
def check_same_shape_and_type(self, other):
return self.shape==other.shape and self.dtype==other.dtype
def get_topology (self):
return '''shape=%(shape)s planar_config=%(planar_config)s sample_index=%(sample_index)s
dtype=%(dtype)s pixel_dtype=%(pixel_dtype)s
bytes_per_pixel=%(bytes_per_pixel)s
bytes_per_sample_row=%(bytes_per_sample_row)s
bytes_per_row=%(bytes_per_row)s
bytes_per_strip=%(uncompressed_bytes_per_strip)s
bytes_per_sample_image=%(bytes_per_sample_image)s
strip_offsets=%(strip_offsets)s
strip_nbytes=%(strip_nbytes)s
strips_per_image=%(strips_per_image)s
rows_per_strip=%(rows_per_strip)s
''' % (self.__dict__)
def get_row(self, index, subindex = None):
if index < 0:
index += self.shape[0]
if index > self.shape[0] or index < 0:
raise IndexError('Row index %r out of bounds [0,%r]' % (index, self.shape[0]-1))
if self.planar_config==1: # RGBRGB..
strip_index, row_index = divmod(index, self.rows_per_strip)
else: # RR..GG..BB..
index2 = self.sample_index * self.shape[0] + index
strip_index, row_index = divmod(index2, self.rows_per_strip)
start = self.strip_offsets[strip_index]
stop = start + self.strip_nbytes[strip_index]
if self.compression==1:
strip = self.ifd.tiff.data[start:stop]
else:
compressed_strip = self.ifd.tiff.data[start:stop]
if self.compression==5: # lzw
strip = tif_lzw.decode(compressed_strip, self.uncompressed_bytes_per_strip)
else:
raise NotImplementedError (repr(self.compression))
start = row_index * self.bytes_per_sample_row + self.sample_offset
stop = start + self.bytes_per_sample_row + self.sample_offset
if isinstance (subindex, tuple):
if len(subindex)==1:
subindex = subindex[0]
if self.planar_config==1:
if isinstance(subindex, int):
start = start + subindex * self.bytes_per_pixel
stop = start + self.bytes_per_pixel
return strip[start:stop].view(dtype=self.pixel_dtype)[self.sample_name][0]
row = strip[start:stop].view(dtype=self.pixel_dtype)[self.sample_name]
if not row.size:
print(self.get_topology())
else:
row = strip[start:stop].view(dtype=self.dtype)
if subindex is not None:
return row[subindex]
return row
def get_rows(self, index, subindex=None):
if isinstance(index, int):
r = self.get_row (index, subindex=subindex)
return r.reshape((1,)+r.shape)
if isinstance (index, slice):
indices = list(range(*index.indices(self.shape[0])))
for i,j in enumerate(indices):
s = self.get_row(j, subindex=subindex)
if i==0:
r = numpy.empty((len (indices),)+s.shape, dtype=self.dtype)
r[i] = s
return r
if isinstance (index, tuple):
if len (index)==1:
return self[index[0]]
raise NotImplementedError (repr(index))
def get_image(self):
if self.is_contiguous:
if self.planar_config==1:
start = self.strip_offsets[0] + self.sample_offset
stop = self.strip_offsets[-1] + self.strip_nbytes[-1]
image =self.ifd.tiff.data[start:stop].view(dtype=self.pixel_dtype)
image = image[self.sample_name].reshape (self.shape)
return image
else:
if self.sample_index is None:
start = self.strip_offsets[0]
else:
start = self.strip_offsets[0] + self.sample_index * self.bytes_per_sample_image
stop = start + self.bytes_per_sample_image
image = self.ifd.tiff.data[start:stop]
image = image.view(dtype=self.dtype).reshape(self.shape)
return image
else:
image = numpy.empty((self.bytes_per_sample_image,), dtype=numpy.uint8)
offset = 0
for strip_index in range (len (self.strip_offsets)):
start = self.strip_offsets[strip_index]
stop = start + self.strip_nbytes[strip_index]
if self.compression==1:
strip = self.ifd.tiff.data[start:stop]
else:
compressed_strip = self.ifd.tiff.data[start:stop]
if self.compression==5: # lzw
strip = tif_lzw.decode(compressed_strip, self.uncompressed_bytes_per_strip)
else:
raise NotImplementedError (repr(self.compression))
target = image[offset:offset + strip.nbytes]
if target.nbytes < strip.nbytes:
print('%s.get_image warning: tiff data contains %s extra bytes (compression=%r) that are ignored' % (self.__class__.__name__, strip.nbytes-target.nbytes, self.compression))
image[offset:offset + strip.nbytes] = strip[:target.nbytes]
offset += strip.nbytes
image = image.view(dtype=self.dtype).reshape(self.shape)
return image
def __len__(self):
return self.shape[0]
def __getitem__(self, index):
if isinstance (index, int):
return self.get_row(index)
elif isinstance(index, slice):
return self.get_image()[index]
elif isinstance(index, tuple):
if len(index)==0:
return self.get_image()
if len(index)==1:
return self[index[0]]
index0 = index[0]
if isinstance(index0, int):
return self.get_row(index0, index[1:])
return self.get_image()[index]
raise NotImplementedError (repr(index))
class TiffSamplePlaneLazy(TiffSamplePlane):
def __init__ (self, tiff_file_getter):
self.tiff_file_getter = tiff_file_getter
self.time = None
self._ifd = None
@property
def ifd(self):
ifd = self._ifd
if ifd is None:
tiff = self.tiff_file_getter()
assert len (tiff.IFD)==1,repr(len (tiff.IFD))
self._ifd = ifd = tiff.IFD[0]
return ifd
@property
def strip_offsets (self): return self.ifd.get_value ('StripOffsets')
@property
def strip_nbytes (self): return self.ifd.get_value ('StripByteCounts')
@property
def compression(self): return self.ifd.get_value ('Compression')
@property
def is_contiguous(self): return self.compression==1 and self.ifd.is_contiguous()
def copy_attrs(self, other):
for attr in ['sample_index', 'planar_config', 'samples_per_pixel','shape',
'rows_per_strip', 'sample_format', 'bits_per_sample',
'dtype', 'pixel_dtype', 'bytes_per_pixel', 'bytes_per_row',
'bytes_per_sample_image', 'uncompressed_bytes_per_strip',
'sample_name', 'sample_offset', 'bytes_per_sample_row',
'strips_per_image'
]:
setattr (self, attr, getattr (other, attr))
| bsd-3-clause | -2,672,315,780,794,638,300 | 39.408304 | 192 | 0.57664 | false |
jalexvig/tensorflow | tensorflow/contrib/gan/python/eval/python/summaries_impl.py | 15 | 8343 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common TFGAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import eval_utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as loss_util
from tensorflow.python.summary import summary
__all__ = [
'add_gan_model_image_summaries',
'add_image_comparison_summaries',
'add_gan_model_summaries',
'add_regularization_loss_summaries',
'add_cyclegan_image_summaries',
]
def _assert_is_image(data):
data.shape.assert_has_rank(4)
data.shape[1:].assert_is_fully_defined()
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
"""Adds image summaries for real and fake images.
Args:
gan_model: A GANModel tuple.
grid_size: The size of an image grid.
model_summaries: Also add summaries of the model.
Raises:
ValueError: If real and generated data aren't images.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
raise ValueError(
'`add_gan_model_image_summaries` does not take CycleGANModels. Please '
'use `add_cyclegan_image_summaries` instead.')
_assert_is_image(gan_model.real_data)
_assert_is_image(gan_model.generated_data)
num_images = grid_size ** 2
real_image_shape = gan_model.real_data.shape.as_list()[1:3]
generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
real_channels = gan_model.real_data.shape.as_list()[3]
generated_channels = gan_model.generated_data.shape.as_list()[3]
summary.image(
'real_data',
eval_utils.image_grid(
gan_model.real_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=real_image_shape,
num_channels=real_channels),
max_outputs=1)
summary.image(
'generated_data',
eval_utils.image_grid(
gan_model.generated_data[:num_images],
grid_shape=(grid_size, grid_size),
image_shape=generated_image_shape,
num_channels=generated_channels),
max_outputs=1)
if model_summaries:
add_gan_model_summaries(gan_model)
def add_cyclegan_image_summaries(cyclegan_model):
"""Adds image summaries for CycleGAN.
There are two summaries, one for each generator. The first image is the
generator input, the second is the generator output, and the third is G(F(x)).
Args:
cyclegan_model: A CycleGANModel tuple.
Raises:
ValueError: If `cyclegan_model` isn't a CycleGANModel.
ValueError: If generated data, generator inputs, and reconstructions aren't
images.
ValueError: If the generator input, generated data, and reconstructions
aren't all the same size.
"""
if not isinstance(cyclegan_model, namedtuples.CycleGANModel):
raise ValueError('`cyclegan_model` was not a CycleGANModel. Instead, was '
'%s' % type(cyclegan_model))
_assert_is_image(cyclegan_model.model_x2y.generator_inputs)
_assert_is_image(cyclegan_model.model_x2y.generated_data)
_assert_is_image(cyclegan_model.reconstructed_x)
_assert_is_image(cyclegan_model.model_y2x.generator_inputs)
_assert_is_image(cyclegan_model.model_y2x.generated_data)
_assert_is_image(cyclegan_model.reconstructed_y)
def _add_comparison_summary(gan_model, reconstructions):
image_list = (array_ops.unstack(gan_model.generator_inputs[:1]) +
array_ops.unstack(gan_model.generated_data[:1]) +
array_ops.unstack(reconstructions[:1]))
summary.image(
'image_comparison', eval_utils.image_reshaper(
image_list, num_cols=len(image_list)), max_outputs=1)
with ops.name_scope('x2y_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_x2y, cyclegan_model.reconstructed_x)
with ops.name_scope('y2x_image_comparison_summaries'):
_add_comparison_summary(
cyclegan_model.model_y2x, cyclegan_model.reconstructed_y)
def add_image_comparison_summaries(gan_model, num_comparisons=2,
display_diffs=False):
"""Adds image summaries to compare triplets of images.
The first image is the generator input, the second is the generator output,
and the third is the real data. This style of comparison is useful for
image translation problems, where the generator input is a corrupted image,
the generator output is the reconstruction, and the real data is the target.
Args:
gan_model: A GANModel tuple.
num_comparisons: The number of image triplets to display.
display_diffs: Also display the difference between generated and target.
Raises:
ValueError: If real data, generated data, and generator inputs aren't
images.
ValueError: If the generator input, real, and generated data aren't all the
same size.
"""
_assert_is_image(gan_model.generator_inputs)
_assert_is_image(gan_model.generated_data)
_assert_is_image(gan_model.real_data)
gan_model.generated_data.shape.assert_is_compatible_with(
gan_model.generator_inputs.shape)
gan_model.real_data.shape.assert_is_compatible_with(
gan_model.generated_data.shape)
image_list = []
image_list.extend(
array_ops.unstack(gan_model.generator_inputs[:num_comparisons]))
image_list.extend(
array_ops.unstack(gan_model.generated_data[:num_comparisons]))
image_list.extend(array_ops.unstack(gan_model.real_data[:num_comparisons]))
if display_diffs:
generated_list = array_ops.unstack(
gan_model.generated_data[:num_comparisons])
real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
diffs = [
math_ops.abs(math_ops.to_float(generated) - math_ops.to_float(real)) for
generated, real in zip(generated_list, real_list)]
image_list.extend(diffs)
# Reshape image and display.
summary.image(
'image_comparison',
eval_utils.image_reshaper(image_list, num_cols=num_comparisons),
max_outputs=1)
def add_gan_model_summaries(gan_model):
"""Adds typical GANModel summaries.
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_summaries'):
add_gan_model_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_summaries'):
add_gan_model_summaries(gan_model.model_y2x)
return
with ops.name_scope('generator_variables'):
for var in gan_model.generator_variables:
summary.histogram(var.name, var)
with ops.name_scope('discriminator_variables'):
for var in gan_model.discriminator_variables:
summary.histogram(var.name, var)
def add_regularization_loss_summaries(gan_model):
"""Adds summaries for a regularization losses..
Args:
gan_model: A GANModel tuple.
"""
if isinstance(gan_model, namedtuples.CycleGANModel):
with ops.name_scope('cyclegan_x2y_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_x2y)
with ops.name_scope('cyclegan_y2x_regularization_loss_summaries'):
add_regularization_loss_summaries(gan_model.model_y2x)
return
if gan_model.generator_scope:
summary.scalar(
'generator_regularization_loss',
loss_util.get_regularization_loss(gan_model.generator_scope.name))
if gan_model.discriminator_scope:
summary.scalar(
'discriminator_regularization_loss',
loss_util.get_regularization_loss(gan_model.discriminator_scope.name))
| apache-2.0 | 7,843,686,979,834,491,000 | 36.412556 | 80 | 0.706101 | false |
charlesvdv/servo | tests/wpt/web-platform-tests/tools/six/test_six.py | 418 | 22226 | import operator
import sys
import types
import py
import six
def test_add_doc():
def f():
"""Icky doc"""
pass
six._add_doc(f, """New doc""")
assert f.__doc__ == "New doc"
def test_import_module():
from logging import handlers
m = six._import_module("logging.handlers")
assert m is handlers
def test_integer_types():
assert isinstance(1, six.integer_types)
assert isinstance(-1, six.integer_types)
assert isinstance(six.MAXSIZE + 23, six.integer_types)
assert not isinstance(.1, six.integer_types)
def test_string_types():
assert isinstance("hi", six.string_types)
assert isinstance(six.u("hi"), six.string_types)
assert issubclass(six.text_type, six.string_types)
def test_class_types():
class X:
pass
class Y(object):
pass
assert isinstance(X, six.class_types)
assert isinstance(Y, six.class_types)
assert not isinstance(X(), six.class_types)
def test_text_type():
assert type(six.u("hi")) is six.text_type
def test_binary_type():
assert type(six.b("hi")) is six.binary_type
def test_MAXSIZE():
try:
# This shouldn't raise an overflow error.
six.MAXSIZE.__index__()
except AttributeError:
# Before Python 2.6.
pass
py.test.raises(
(ValueError, OverflowError),
operator.mul, [None], six.MAXSIZE + 1)
def test_lazy():
if six.PY3:
html_name = "html.parser"
else:
html_name = "HTMLParser"
assert html_name not in sys.modules
mod = six.moves.html_parser
assert sys.modules[html_name] is mod
assert "htmlparser" not in six._MovedItems.__dict__
try:
import _tkinter
except ImportError:
have_tkinter = False
else:
have_tkinter = True
have_gdbm = True
try:
import gdbm
except ImportError:
try:
import dbm.gnu
except ImportError:
have_gdbm = False
@py.test.mark.parametrize("item_name",
[item.name for item in six._moved_attributes])
def test_move_items(item_name):
"""Ensure that everything loads correctly."""
try:
item = getattr(six.moves, item_name)
if isinstance(item, types.ModuleType):
__import__("six.moves." + item_name)
except AttributeError:
if item_name == "zip_longest" and sys.version_info < (2, 6):
py.test.skip("zip_longest only available on 2.6+")
except ImportError:
if item_name == "winreg" and not sys.platform.startswith("win"):
py.test.skip("Windows only module")
if item_name.startswith("tkinter"):
if not have_tkinter:
py.test.skip("requires tkinter")
if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
py.test.skip("ttk only available on 2.7+")
if item_name.startswith("dbm_gnu") and not have_gdbm:
py.test.skip("requires gdbm")
raise
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_parse_moved_attributes])
def test_move_items_urllib_parse(item_name):
"""Ensure that everything loads correctly."""
if item_name == "ParseResult" and sys.version_info < (2, 5):
py.test.skip("ParseResult is only found on 2.5+")
if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
py.test.skip("parse_qs[l] is new in 2.6")
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.parse)
getattr(six.moves.urllib.parse, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_error_moved_attributes])
def test_move_items_urllib_error(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.error)
getattr(six.moves.urllib.error, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_request_moved_attributes])
def test_move_items_urllib_request(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.request)
getattr(six.moves.urllib.request, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_response_moved_attributes])
def test_move_items_urllib_response(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.response)
getattr(six.moves.urllib.response, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_robotparser_moved_attributes])
def test_move_items_urllib_robotparser(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.robotparser)
getattr(six.moves.urllib.robotparser, item_name)
def test_import_moves_error_1():
from six.moves.urllib.parse import urljoin
from six import moves
# In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
assert moves.urllib.parse.urljoin
def test_import_moves_error_2():
from six import moves
assert moves.urllib.parse.urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib.parse import urljoin
def test_import_moves_error_3():
from six.moves.urllib.parse import urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib_parse import urljoin
def test_from_imports():
from six.moves.queue import Queue
assert isinstance(Queue, six.class_types)
from six.moves.configparser import ConfigParser
assert isinstance(ConfigParser, six.class_types)
def test_filter():
from six.moves import filter
f = filter(lambda x: x % 2, range(10))
assert six.advance_iterator(f) == 1
def test_filter_false():
from six.moves import filterfalse
f = filterfalse(lambda x: x % 3, range(10))
assert six.advance_iterator(f) == 0
assert six.advance_iterator(f) == 3
assert six.advance_iterator(f) == 6
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
def test_zip():
from six.moves import zip
assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
@py.test.mark.skipif("sys.version_info < (2, 6)")
def test_zip_longest():
from six.moves import zip_longest
it = zip_longest(range(2), range(1))
assert six.advance_iterator(it) == (0, 0)
assert six.advance_iterator(it) == (1, None)
class TestCustomizedMoves:
def teardown_method(self, meth):
try:
del six._MovedItems.spam
except AttributeError:
pass
try:
del six.moves.__dict__["spam"]
except KeyError:
pass
def test_moved_attribute(self):
attr = six.MovedAttribute("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
assert attr.attr == "spam"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
assert attr.attr == "lemma"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
if six.PY3:
assert attr.attr == "theorm"
else:
assert attr.attr == "lemma"
def test_moved_module(self):
attr = six.MovedModule("spam", "foo")
if six.PY3:
assert attr.mod == "spam"
else:
assert attr.mod == "foo"
attr = six.MovedModule("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
def test_custom_move_module(self):
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
from six.moves import spam
assert spam is six
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_custom_move_attribute(self):
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
from six.moves import spam
assert spam is six.u
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_empty_remove(self):
py.test.raises(AttributeError, six.remove_move, "eggs")
def test_get_unbound_function():
class X(object):
def m(self):
pass
assert six.get_unbound_function(X.m) is X.__dict__["m"]
def test_get_method_self():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_self(x.m) is x
py.test.raises(AttributeError, six.get_method_self, 42)
def test_get_method_function():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_function(x.m) is X.__dict__["m"]
py.test.raises(AttributeError, six.get_method_function, hasattr)
def test_get_function_closure():
def f():
x = 42
def g():
return x
return g
cell = six.get_function_closure(f())[0]
assert type(cell).__name__ == "cell"
def test_get_function_code():
def f():
pass
assert isinstance(six.get_function_code(f), types.CodeType)
if not hasattr(sys, "pypy_version_info"):
py.test.raises(AttributeError, six.get_function_code, hasattr)
def test_get_function_defaults():
def f(x, y=3, b=4):
pass
assert six.get_function_defaults(f) == (3, 4)
def test_get_function_globals():
def f():
pass
assert six.get_function_globals(f) is globals()
def test_dictionary_iterators(monkeypatch):
def stock_method_name(iterwhat):
"""Given a method suffix like "lists" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return iterwhat
return 'iter' + iterwhat
class MyDict(dict):
if not six.PY3:
def lists(self, **kw):
return [1, 2, 3]
def iterlists(self, **kw):
return iter([1, 2, 3])
f = MyDict.iterlists
del MyDict.iterlists
setattr(MyDict, stock_method_name('lists'), f)
d = MyDict(zip(range(10), reversed(range(10))))
for name in "keys", "values", "items", "lists":
meth = getattr(six, "iter" + name)
it = meth(d)
assert not isinstance(it, list)
assert list(it) == list(getattr(d, name)())
py.test.raises(StopIteration, six.advance_iterator, it)
record = []
def with_kw(*args, **kw):
record.append(kw["kw"])
return old(*args)
old = getattr(MyDict, stock_method_name(name))
monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
meth(d, kw=42)
assert record == [42]
monkeypatch.undo()
@py.test.mark.skipif(sys.version_info[:2] < (2, 7),
reason="view methods on dictionaries only available on 2.7+")
def test_dictionary_views():
def stock_method_name(viewwhat):
"""Given a method suffix like "keys" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return viewwhat
return 'view' + viewwhat
d = dict(zip(range(10), (range(11, 20))))
for name in "keys", "values", "items":
meth = getattr(six, "view" + name)
view = meth(d)
assert set(view) == set(getattr(d, name)())
def test_advance_iterator():
assert six.next is six.advance_iterator
l = [1, 2]
it = iter(l)
assert six.next(it) == 1
assert six.next(it) == 2
py.test.raises(StopIteration, six.next, it)
py.test.raises(StopIteration, six.next, it)
def test_iterator():
class myiter(six.Iterator):
def __next__(self):
return 13
assert six.advance_iterator(myiter()) == 13
class myitersub(myiter):
def __next__(self):
return 14
assert six.advance_iterator(myitersub()) == 14
def test_callable():
class X:
def __call__(self):
pass
def method(self):
pass
assert six.callable(X)
assert six.callable(X())
assert six.callable(test_callable)
assert six.callable(hasattr)
assert six.callable(X.method)
assert six.callable(X().method)
assert not six.callable(4)
assert not six.callable("string")
def test_create_bound_method():
class X(object):
pass
def f(self):
return self
x = X()
b = six.create_bound_method(f, x)
assert isinstance(b, types.MethodType)
assert b() is x
if six.PY3:
def test_b():
data = six.b("\xff")
assert isinstance(data, bytes)
assert len(data) == 1
assert data == bytes([255])
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, str)
assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
else:
def test_b():
data = six.b("\xff")
assert isinstance(data, str)
assert len(data) == 1
assert data == "\xff"
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, unicode)
assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
def test_u_escapes():
s = six.u("\u1234")
assert len(s) == 1
def test_unichr():
assert six.u("\u1234") == six.unichr(0x1234)
assert type(six.u("\u1234")) is type(six.unichr(0x1234))
def test_int2byte():
assert six.int2byte(3) == six.b("\x03")
py.test.raises((OverflowError, ValueError), six.int2byte, 256)
def test_byte2int():
assert six.byte2int(six.b("\x03")) == 3
assert six.byte2int(six.b("\x03\x04")) == 3
py.test.raises(IndexError, six.byte2int, six.b(""))
def test_bytesindex():
assert six.indexbytes(six.b("hello"), 3) == ord("l")
def test_bytesiter():
it = six.iterbytes(six.b("hi"))
assert six.next(it) == ord("h")
assert six.next(it) == ord("i")
py.test.raises(StopIteration, six.next, it)
def test_StringIO():
fp = six.StringIO()
fp.write(six.u("hello"))
assert fp.getvalue() == six.u("hello")
def test_BytesIO():
fp = six.BytesIO()
fp.write(six.b("hello"))
assert fp.getvalue() == six.b("hello")
def test_exec_():
def f():
l = []
six.exec_("l.append(1)")
assert l == [1]
f()
ns = {}
six.exec_("x = 42", ns)
assert ns["x"] == 42
glob = {}
loc = {}
six.exec_("global y; y = 42; x = 12", glob, loc)
assert glob["y"] == 42
assert "x" not in glob
assert loc["x"] == 12
assert "y" not in loc
def test_reraise():
def get_next(tb):
if six.PY3:
return tb.tb_next.tb_next
else:
return tb.tb_next
e = Exception("blah")
try:
raise e
except Exception:
tp, val, tb = sys.exc_info()
try:
six.reraise(tp, val, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb is get_next(tb2)
try:
six.reraise(tp, val)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb2 is not tb
try:
six.reraise(tp, val, tb2)
except Exception:
tp2, value2, tb3 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert get_next(tb3) is tb2
try:
six.reraise(tp, None, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is not val
assert isinstance(value2, Exception)
assert tb is get_next(tb2)
def test_raise_from():
try:
try:
raise Exception("blah")
except Exception:
ctx = sys.exc_info()[1]
f = Exception("foo")
six.raise_from(f, None)
except Exception:
tp, val, tb = sys.exc_info()
if sys.version_info[:2] > (3, 0):
# We should have done a raise f from None equivalent.
assert val.__cause__ is None
assert val.__context__ is ctx
if sys.version_info[:2] >= (3, 3):
# And that should suppress the context on the exception.
assert val.__suppress_context__
# For all versions the outer exception should have raised successfully.
assert str(val) == "foo"
def test_print_():
save = sys.stdout
out = sys.stdout = six.moves.StringIO()
try:
six.print_("Hello,", "person!")
finally:
sys.stdout = save
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out)
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, end="")
assert out.getvalue() == "Hello, person!"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, sep="X")
assert out.getvalue() == "Hello,Xperson!\n"
out = six.StringIO()
six.print_(six.u("Hello,"), six.u("person!"), file=out)
result = out.getvalue()
assert isinstance(result, six.text_type)
assert result == six.u("Hello, person!\n")
six.print_("Hello", file=None) # This works.
out = six.StringIO()
six.print_(None, file=out)
assert out.getvalue() == "None\n"
@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
def test_print_encoding(monkeypatch):
# Fool the type checking in print_.
monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
out = six.BytesIO()
out.encoding = "utf-8"
out.errors = None
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\xd4\xbc")
out = six.BytesIO()
out.encoding = "ascii"
out.errors = "strict"
py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
out.errors = "backslashreplace"
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\\u053c")
def test_print_exceptions():
py.test.raises(TypeError, six.print_, x=3)
py.test.raises(TypeError, six.print_, end=3)
py.test.raises(TypeError, six.print_, sep=42)
def test_with_metaclass():
class Meta(type):
pass
class X(six.with_metaclass(Meta)):
pass
assert type(X) is Meta
assert issubclass(X, object)
class Base(object):
pass
class X(six.with_metaclass(Meta, Base)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(six.with_metaclass(Meta, Base, Base2)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
assert X.__mro__ == (X, Base, Base2, object)
def test_wraps():
def f(g):
@six.wraps(g)
def w():
return 42
return w
def k():
pass
original_k = k
k = f(f(k))
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert k is original_k
assert not hasattr(k, '__wrapped__')
def f(g, assign, update):
def w():
return 42
w.glue = {"foo" : "bar"}
return six.wraps(g, assign, update)(w)
k.glue = {"melon" : "egg"}
k.turnip = 43
k = f(k, ["turnip"], ["glue"])
assert k.__name__ == "w"
assert k.turnip == 43
assert k.glue == {"melon" : "egg", "foo" : "bar"}
def test_add_metaclass():
class Meta(type):
pass
class X:
"success"
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, object)
assert X.__module__ == __name__
assert X.__doc__ == "success"
class Base(object):
pass
class X(Base):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(Base, Base2):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
# Test a second-generation subclass of a type.
class Meta1(type):
m1 = "m1"
class Meta2(Meta1):
m2 = "m2"
class Base:
b = "b"
Base = six.add_metaclass(Meta1)(Base)
class X(Base):
x = "x"
X = six.add_metaclass(Meta2)(X)
assert type(X) is Meta2
assert issubclass(X, Base)
assert type(Base) is Meta1
assert "__dict__" not in vars(X)
instance = X()
instance.attr = "test"
assert vars(instance) == {"attr": "test"}
assert instance.b == Base.b
assert instance.x == X.x
# Test a class with slots.
class MySlots(object):
__slots__ = ["a", "b"]
MySlots = six.add_metaclass(Meta1)(MySlots)
assert MySlots.__slots__ == ["a", "b"]
instance = MySlots()
instance.a = "foo"
py.test.raises(AttributeError, setattr, instance, "c", "baz")
# Test a class with string for slots.
class MyStringSlots(object):
__slots__ = "ab"
MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
assert MyStringSlots.__slots__ == "ab"
instance = MyStringSlots()
instance.ab = "foo"
py.test.raises(AttributeError, setattr, instance, "a", "baz")
py.test.raises(AttributeError, setattr, instance, "b", "baz")
class MySlotsWeakref(object):
__slots__ = "__weakref__",
MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
assert type(MySlotsWeakref) is Meta
| mpl-2.0 | 6,219,015,943,173,747,000 | 27.241423 | 97 | 0.58877 | false |
thnee/ansible | lib/ansible/modules/cloud/amazon/aws_ses_rule_set.py | 13 | 8315 | #!/usr/bin/python
# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_ses_rule_set
short_description: Manages SES inbound receipt rule sets
description:
- The M(aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets
version_added: 2.8
author:
- "Ben Tomasik (@tomislacker)"
- "Ed Costello (@orthanc)"
requirements: [ boto3, botocore ]
options:
name:
description:
- The name of the receipt rule set.
required: True
type: str
state:
description:
- Whether to create (or update) or destroy the receipt rule set.
required: False
default: present
choices: ["absent", "present"]
type: str
active:
description:
- Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present).
- If omitted, the active rule set will not be changed.
- If C(True) then this rule set will be made active and all others inactive.
- if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set.
type: bool
required: False
force:
description:
- When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set).
type: bool
required: False
default: False
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- name: Create default rule set and activate it if not already
aws_ses_rule_set:
name: default-rule-set
state: present
active: yes
- name: Create some arbitrary rule set but do not activate it
aws_ses_rule_set:
name: arbitrary-rule-set
state: present
- name: Explicitly deactivate the default rule set leaving no active rule set
aws_ses_rule_set:
name: default-rule-set
state: present
active: no
- name: Remove an arbitrary inactive rule set
aws_ses_rule_set:
name: arbitrary-rule-set
state: absent
- name: Remove an ruleset even if we have to first deactivate it to remove it
aws_ses_rule_set:
name: default-rule-set
state: absent
force: yes
"""
RETURN = """
active:
description: if the SES rule set is active
returned: success if I(state) is C(present)
type: bool
sample: true
rule_sets:
description: The list of SES receipt rule sets that exist after any changes.
returned: success
type: list
sample: [{
"created_timestamp": "2018-02-25T01:20:32.690000+00:00",
"name": "default-rule-set"
}]
"""
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
def list_rule_sets(client, module):
try:
response = client.list_receipt_rule_sets(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't list rule sets.")
return response['RuleSets']
def rule_set_in(name, rule_sets):
return any([s for s in rule_sets if s['Name'] == name])
def ruleset_active(client, module, name):
try:
active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't get the active rule set.")
if active_rule_set is not None and 'Metadata' in active_rule_set:
return name == active_rule_set['Metadata']['Name']
else:
# Metadata was not set meaning there is no active rule set
return False
def deactivate_rule_set(client, module):
try:
# No ruleset name deactivates all rulesets
client.set_active_receipt_rule_set(aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't set active rule set to None.")
def update_active_rule_set(client, module, name, desired_active):
check_mode = module.check_mode
active = ruleset_active(client, module, name)
changed = False
if desired_active is not None:
if desired_active and not active:
if not check_mode:
try:
client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
changed = True
active = True
elif not desired_active and active:
if not check_mode:
deactivate_rule_set(client, module)
changed = True
active = False
return changed, active
def create_or_update_rule_set(client, module):
name = module.params.get('name')
check_mode = module.check_mode
changed = False
rule_sets = list_rule_sets(client, module)
if not rule_set_in(name, rule_sets):
if not check_mode:
try:
client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
changed = True
rule_sets = list(rule_sets)
rule_sets.append({
'Name': name,
})
(active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
changed |= active_changed
module.exit_json(
changed=changed,
active=active,
rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
)
def remove_rule_set(client, module):
name = module.params.get('name')
check_mode = module.check_mode
changed = False
rule_sets = list_rule_sets(client, module)
if rule_set_in(name, rule_sets):
active = ruleset_active(client, module, name)
if active and not module.params.get('force'):
module.fail_json(
msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
error={
"code": "CannotDelete",
"message": "Cannot delete active rule set: {0}".format(name),
}
)
if not check_mode:
if active and module.params.get('force'):
deactivate_rule_set(client, module)
try:
client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
changed = True
rule_sets = [x for x in rule_sets if x['Name'] != name]
module.exit_json(
changed=changed,
rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
)
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
active=dict(type='bool'),
force=dict(type='bool', default=False),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
state = module.params.get('state')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
if state == 'absent':
remove_rule_set(client, module)
else:
create_or_update_rule_set(client, module)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,478,112,122,292,689,000 | 31.73622 | 140 | 0.642814 | false |
noelbk/neutron-juniper | neutron/plugins/nec/db/api.py | 9 | 7966 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import sqlalchemy as sa
from neutron.db import api as db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import config # noqa
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import models as nmodels
LOG = logging.getLogger(__name__)
OFP_VLAN_NONE = 0xffff
resource_map = {'ofc_tenant': nmodels.OFCTenantMapping,
'ofc_network': nmodels.OFCNetworkMapping,
'ofc_port': nmodels.OFCPortMapping,
'ofc_router': nmodels.OFCRouterMapping,
'ofc_packet_filter': nmodels.OFCFilterMapping}
old_resource_map = {'ofc_tenant': nmodels.OFCTenant,
'ofc_network': nmodels.OFCNetwork,
'ofc_port': nmodels.OFCPort,
'ofc_packet_filter': nmodels.OFCFilter}
# utitlity methods
def _get_resource_model(resource, old_style):
if old_style:
# NOTE: Some new resources are not defined in old_resource_map.
# In such case None is returned.
return old_resource_map.get(resource)
else:
return resource_map[resource]
def initialize():
db.configure_db()
def clear_db(base=model_base.BASEV2):
db.clear_db(base)
def get_ofc_item(session, resource, neutron_id, old_style=False):
model = _get_resource_model(resource, old_style)
if not model:
return None
try:
return session.query(model).filter_by(quantum_id=neutron_id).one()
except sa.orm.exc.NoResultFound:
return None
def get_ofc_id(session, resource, neutron_id, old_style=False):
ofc_item = get_ofc_item(session, resource, neutron_id, old_style)
if ofc_item:
if old_style:
return ofc_item.id
else:
return ofc_item.ofc_id
else:
return None
def exists_ofc_item(session, resource, neutron_id, old_style=False):
if get_ofc_item(session, resource, neutron_id, old_style):
return True
else:
return False
def find_ofc_item(session, resource, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(id=ofc_id)
else:
params = dict(ofc_id=ofc_id)
return (session.query(model).filter_by(**params).one())
except sa.orm.exc.NoResultFound:
return None
def add_ofc_item(session, resource, neutron_id, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(quantum_id=neutron_id, id=ofc_id)
else:
params = dict(quantum_id=neutron_id, ofc_id=ofc_id)
item = model(**params)
with session.begin(subtransactions=True):
session.add(item)
session.flush()
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return item
def del_ofc_item(session, resource, neutron_id, old_style=False,
warning=True):
try:
model = _get_resource_model(resource, old_style)
with session.begin(subtransactions=True):
item = session.query(model).filter_by(quantum_id=neutron_id).one()
session.delete(item)
return True
except sa.orm.exc.NoResultFound:
if warning:
LOG.warning(_("_del_ofc_item(): NotFound item "
"(model=%(model)s, id=%(id)s) "),
{'model': model, 'id': neutron_id})
return False
def get_ofc_id_lookup_both(session, resource, neutron_id):
ofc_id = get_ofc_id(session, resource, neutron_id)
# Lookup old style of OFC mapping table
if not ofc_id:
ofc_id = get_ofc_id(session, resource, neutron_id,
old_style=True)
if not ofc_id:
reason = (_("NotFound %(resource)s for neutron_id=%(id)s.")
% {'resource': resource, 'id': neutron_id})
raise nexc.OFCConsistencyBroken(reason=reason)
return ofc_id
def exists_ofc_item_lookup_both(session, resource, neutron_id):
if exists_ofc_item(session, resource, neutron_id):
return True
# Check old style of OFC mapping table
if exists_ofc_item(session, resource, neutron_id,
old_style=True):
return True
return False
def del_ofc_item_lookup_both(session, resource, neutron_id):
# Delete the mapping from new style of OFC mapping table
if del_ofc_item(session, resource, neutron_id,
old_style=False, warning=False):
return
# Delete old style of OFC mapping table
if del_ofc_item(session, resource, neutron_id,
old_style=True, warning=False):
return
# The specified resource not found
LOG.warning(_("_del_ofc_item(): NotFound item "
"(resource=%(resource)s, id=%(id)s) "),
{'resource': resource, 'id': neutron_id})
def get_portinfo(session, id):
try:
return (session.query(nmodels.PortInfo).
filter_by(id=id).
one())
except sa.orm.exc.NoResultFound:
return None
def add_portinfo(session, id, datapath_id='', port_no=0,
vlan_id=OFP_VLAN_NONE, mac=''):
try:
portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id,
port_no=port_no, vlan_id=vlan_id, mac=mac)
with session.begin(subtransactions=True):
session.add(portinfo)
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return portinfo
def del_portinfo(session, id):
try:
with session.begin(subtransactions=True):
portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one()
session.delete(portinfo)
except sa.orm.exc.NoResultFound:
LOG.warning(_("del_portinfo(): NotFound portinfo for "
"port_id: %s"), id)
def get_port_from_device(port_id):
"""Get port from database."""
LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
| apache-2.0 | 7,108,348,914,384,952,000 | 33.188841 | 78 | 0.625659 | false |
F5Networks/f5-common-python | f5/bigip/tm/asm/test/functional/test_signature_statuses.py | 1 | 2730 | # Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.tm.asm.signature_statuses import Signature_Status
import pytest
from requests.exceptions import HTTPError
def get_sigstatid(request, mgmt_root):
sigcoll = mgmt_root.tm.asm.signature_statuses_s.get_collection()
# We obtain the ID for the resource to test and return the hashed id
hashid = str(sigcoll[0].id)
return hashid
class TestSignatureStatuses(object):
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.asm.signature_statuses_s.signature_status.load(
id='Lx3553-321'
)
assert err.response.status_code == 404
def test_load(self, request, mgmt_root):
hashid = get_sigstatid(request, mgmt_root)
sigstat = mgmt_root.tm.asm.signature_statuses_s.signature_status.load(id=hashid)
kind = 'tm:asm:signature-statuses:signature-statusstate'
baseuri = 'https://localhost/mgmt/tm/asm/signature-statuses/'
final_uri = baseuri + hashid
assert sigstat.id == hashid
assert sigstat.kind == kind
assert sigstat.selfLink.startswith(final_uri)
assert sigstat.isUserDefined is False
def test_refresh(self, request, mgmt_root):
hashid = get_sigstatid(request, mgmt_root)
sigstat = mgmt_root.tm.asm.signature_statuses_s.signature_status.load(id=hashid)
sigstat2 = mgmt_root.tm.asm.signature_statuses_s.signature_status.load(id=hashid)
kind = 'tm:asm:signature-statuses:signature-statusstate'
baseuri = 'https://localhost/mgmt/tm/asm/signature-statuses/'
final_uri = baseuri + hashid
assert sigstat.id == hashid
assert sigstat.kind == kind
assert sigstat.selfLink.startswith(final_uri)
sigstat2.refresh()
assert sigstat.id == sigstat2.id
assert sigstat.kind == sigstat2.kind
assert sigstat.selfLink == sigstat2.selfLink
def test_collection(self, mgmt_root):
sc = mgmt_root.tm.asm.signature_statuses_s.get_collection()
assert isinstance(sc, list)
assert len(sc)
assert isinstance(sc[0], Signature_Status)
| apache-2.0 | -731,689,270,512,665,200 | 40.363636 | 89 | 0.692308 | false |
shaufi10/odoo | openerp/addons/base/ir/ir_exports.py | 338 | 1672 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class ir_exports(osv.osv):
_name = "ir.exports"
_order = 'name'
_columns = {
'name': fields.char('Export Name'),
'resource': fields.char('Resource', select=True),
'export_fields': fields.one2many('ir.exports.line', 'export_id',
'Export ID', copy=True),
}
class ir_exports_line(osv.osv):
_name = 'ir.exports.line'
_order = 'id'
_columns = {
'name': fields.char('Field Name'),
'export_id': fields.many2one('ir.exports', 'Export', select=True, ondelete='cascade'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 725,517,684,823,619,500 | 36.155556 | 94 | 0.586124 | false |
cucs-numpde/class | fdtools.py | 1 | 3922 | import numpy
def cosspace(a, b, n=50):
return (a + b)/2 + (b - a)/2 * (numpy.cos(numpy.linspace(-numpy.pi, 0, n)))
def vander_chebyshev(x, n=None):
if n is None:
n = len(x)
T = numpy.ones((len(x), n))
if n > 1:
T[:,1] = x
for k in range(2,n):
T[:,k] = 2 * x * T[:,k-1] - T[:,k-2]
return T
def chebeval(z, n=None):
"""Build matrices to evaluate the n-term Chebyshev expansion and its derivatives at point(s) z"""
z = numpy.array(z, ndmin=1)
if n is None:
n = len(z)
Tz = vander_chebyshev(z, n)
dTz = numpy.zeros_like(Tz)
dTz[:,1] = 1
dTz[:,2] = 4*z
ddTz = numpy.zeros_like(Tz)
ddTz[:,2] = 4
for n in range(3,n):
dTz[:,n] = n * (2*Tz[:,n-1] + dTz[:,n-2]/(n-2))
ddTz[:,n] = n * (2*dTz[:,n-1] + ddTz[:,n-2]/(n-2))
return [Tz, dTz, ddTz]
def fdstencilV(z, x):
"""Compute finite difference weights using a Vandermonde matrix"""
x = numpy.array(x)
V = numpy.vander(x - z, increasing=True)
scaling = numpy.array([numpy.math.factorial(i) for i in range(len(x))])
return (numpy.linalg.inv(V).T * scaling).T
def fdstencil(z, x, nderiv=None):
"""Compute finite difference weights using recurrences for Lagrange polynomials (see Fornberg 1998)"""
if nderiv is None:
nderiv = len(x)
x = numpy.array(x) - z
k = numpy.arange(nderiv+1)
c = numpy.outer(0.*k, x)
c[0,0] = 1
prod = 1
for j in range(1,len(x)):
dx = x[j] - x[:j]
c[1:,j] = x[j-1]*c[1:,j-1] - k[1:]*c[:-1,j-1]
c[0,j] = x[j-1]*c[0,j-1]
c[:,j] *= -prod
prod = numpy.prod(dx)
c[:,j] /= prod
c[1:,:j] = (x[j]*c[1:,:j] - k[1:,None]*c[:-1,:j]) / dx
c[0,:j] = x[j]*c[0,:j] / dx
return c
def fdcompact(z, x, k):
"""Compute a compact (implicit) differencing scheme
b @ u^(k)(z) = c @ u(x)
that maximizes the accuracy of u^(k)(z[0])."""
z = numpy.array(z)
x = numpy.array(x)
n = len(x)
x = x - z[0]
z = z - z[0]
xmin, xmax = min(x), max(x)
dx = (xmax - xmin) / (n - 1)
y = numpy.zeros(n + len(z) - 1)
y[:n] = x
for i in range(1, len(z)):
if (z[i] < 0):
xmin -= dx
y[n + i - 1] = xmin
else:
xmax += dx
y[n + i - 1] = xmax
S = numpy.array([fdstencil(t, y, k)[k] for t in z])
b = numpy.ones(len(z))
T = S[1:,n:].T
b[1:] = numpy.linalg.lstsq(T, -S[0,n:])[0]
c = b.dot(S[:,:n])
return b, c
def dispersion(z, x, b, c):
from matplotlib import pyplot
theta = numpy.linspace(0, numpy.pi, 100)[1:]
phiz = numpy.exp(1j*numpy.outer(z, theta))
phix = numpy.exp(1j*numpy.outer(x, theta))
pyplot.plot(theta, (c.dot(phix) / b.dot(phiz)).imag, '.')
pyplot.plot(theta, theta)
pyplot.plot(theta, numpy.sin(theta))
pyplot.show()
def rk_butcher_4():
A = numpy.array([[0,0,0,0],[.5,0,0,0],[0,.5,0,0],[0,0,1,0]])
b = numpy.array([1/6, 1/3, 1/3, 1/6])
return A, b
def rk_butcher_ssp32():
A = numpy.array([[0, 0, 0],
[1/2, 0, 0],
[1/2, 1/2, 0]])
b = numpy.array([1/3, 1/3, 1/3])
return A, b
def ode_rkexplicit(f, u0, butcher=None, tfinal=1, h=.1):
if butcher is None:
A, b = rk_butcher_4()
else:
A, b = butcher
c = numpy.sum(A, axis=1)
s = len(c)
u = u0.copy()
t = 0
hist = [(t,u0)]
while t < tfinal:
if tfinal - t < 1.01*h:
h = tfinal - t
tnext = tfinal
else:
tnext = t + h
h = min(h, tfinal - t)
fY = numpy.zeros((len(u0), s))
for i in range(s):
Yi = u.copy()
for j in range(i):
Yi += h * A[i,j] * fY[:,j]
fY[:,i] = f(t + h*c[i], Yi)
u += h * fY.dot(b)
t = tnext
hist.append((t, u.copy()))
return hist
| bsd-2-clause | -1,203,509,709,460,108,800 | 27.838235 | 106 | 0.478327 | false |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/ctypes/test/test_arrays.py | 72 | 4269 | import unittest
from ctypes import *
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = range(15, 25)
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = range(42, 42+alen)
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray("a", "b", "c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], "a")
self.assertEqual(ca[1], "b")
self.assertEqual(ca[2], "c")
self.assertEqual(ca[-3], "a")
self.assertEqual(ca[-2], "b")
self.assertEqual(ca[-1], "c")
self.assertEqual(len(ca), 3)
# slicing is now supported, but not extended slicing (3-argument)!
from operator import getslice, delitem
self.assertRaises(TypeError, getslice, ca, 0, 1, -1)
# cannot delete items
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4))
self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer("foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertTrue(t1 is t2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 1,291,114,521,704,471,300 | 30.858209 | 74 | 0.548606 | false |
arpankapoor/pambu | pambu.py | 1 | 9458 | #!/usr/bin/env python3
import copy
import curses
import curses.ascii
from enum import Enum
import locale
import math
import sys
import signal
class Direction(Enum):
north, east, south, west = range(4)
def is_opp(self, other):
return ((self == Direction.north and other == Direction.south) or
(self == Direction.south and other == Direction.north) or
(self == Direction.east and other == Direction.west) or
(self == Direction.west and other == Direction.east))
class Point:
"""A point represented by a *y* and *x* coordinate"""
def __init__(self, y, x):
self.y = y
self.x = x
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def distance_from(self, other):
dy = other.y - self.y
dx = other.x - self.x
return math.sqrt(dy**2 + dx**2)
def has_same_y(self, other):
return self.y == other.y
def has_same_x(self, other):
return self.x == other.x
def is_to_the_left_of(self, other):
return self.x <= other.x
def is_to_the_right_of(self, other):
return self.x >= other.x
def is_above(self, other):
return self.y <= other.y
def is_below(self, other):
return self.y >= other.y
def move(self, direction):
"""Move 1 unit in given direction"""
if direction == Direction.north:
self.y -= 1
elif direction == Direction.west:
self.x -= 1
elif direction == Direction.south:
self.y += 1
elif direction == Direction.east:
self.x += 1
class LineSegment:
"""A line segment represented by a head and tail point"""
def __init__(self, head, tail):
self.head = head
self.tail = tail
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def length(self):
return self.head.distance_from(self.tail)
def is_vertical(self):
return self.head.has_same_x(self.tail)
def is_horizontal(self):
return self.head.has_same_y(self.tail)
def increment(self):
"""Increase the line segment length by 1 from the head"""
if self.is_horizontal():
if self.head.x < self.tail.x:
self.head.move(Direction.west)
else:
self.head.move(Direction.east)
elif self.is_vertical():
if self.head.y < self.tail.y:
self.head.move(Direction.north)
else:
self.head.move(Direction.south)
def decrement(self):
"""Decrease the line segment length by 1 from the tail"""
if self.is_horizontal():
if self.head.x < self.tail.x:
self.tail.move(Direction.west)
else:
self.tail.move(Direction.east)
elif self.is_vertical():
if self.head.y < self.tail.y:
self.tail.move(Direction.north)
else:
self.tail.move(Direction.south)
def draw(self, window):
"""Draw the line if it is horizontal or vertical"""
length = math.floor(self.length())
start_point = Point(min(self.head.y, self.tail.y),
min(self.head.x, self.tail.x))
if self.is_vertical():
window.vline(start_point.y, start_point.x, 0, length)
elif self.is_horizontal():
window.hline(start_point.y, start_point.x, 0, length)
def lies_on(self, point):
if self.is_horizontal():
return point.x <= max(self.head.x, self.tail.x) and point.x >= min(self.head.x, self.tail.x) and point.y == self.head.y
if self.is_vertical():
return point.y <= max(self.head.y, self.tail.y) and point.y >= min(self.head.y, self.tail.y) and point.x == self.head.x
def intersection_point(self, other):
if isinstance(other, self.__class__):
if self.head == other.head or self.head == other.tail:
return self.head
elif self.tail == other.head or self.tail == other.tail:
return self.tail
else:
return None
def join(self, other, window):
def join_char(hline, vline):
ch = None
if (ipoint.is_to_the_left_of(hline.head) and
ipoint.is_to_the_left_of(hline.tail)):
if (ipoint.is_above(vline.head) and
ipoint.is_above(vline.tail)):
ch = curses.ACS_ULCORNER
elif (ipoint.is_below(vline.head) and
ipoint.is_below(vline.tail)):
ch = curses.ACS_LLCORNER
elif (ipoint.is_to_the_right_of(hline.head) and
ipoint.is_to_the_right_of(hline.tail)):
if (ipoint.is_above(vline.head) and
ipoint.is_above(vline.tail)):
ch = curses.ACS_URCORNER
elif (ipoint.is_below(vline.head) and
ipoint.is_below(vline.tail)):
ch = curses.ACS_LRCORNER
return ch
if isinstance(other, self.__class__):
hline = None
vline = None
if self.is_vertical():
vline = self
elif self.is_horizontal():
hline = self
if other.is_vertical():
vline = other
elif other.is_horizontal():
hline = other
if hline is not None and vline is not None and hline != vline:
ipoint = hline.intersection_point(vline)
if ipoint is not None:
ch = join_char(hline, vline)
if ch is not None:
window.addch(ipoint.y, ipoint.x, ch)
class Snake:
def __init__(self, dimensions):
maxy, maxx = dimensions
self.points = [Point(math.floor(0.49 * maxy), math.floor(0.59 * maxx)),
Point(math.floor(0.49 * maxy), math.floor(0.40 * maxx))]
self.direction = Direction.east
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def draw(self, window):
prev_seg = None
for prev_point, curr_point in zip(self.points[:-1], self.points[1:]):
curr_seg = LineSegment(prev_point, curr_point)
curr_seg.draw(window)
curr_seg.join(prev_seg, window)
prev_seg = curr_seg
def detect_collision(self):
head = self.points[0]
prev_seg = None
for prev_point, curr_point in zip(self.points[1:-1], self.points[2:]):
curr_seg = LineSegment(prev_point, curr_point)
if curr_seg.lies_on(head):
curses.endwin()
print("Collision Detected!")
sys.exit(0)
def move(self, window, direction=None):
"""Move 1 unit in given direction"""
first_seg = LineSegment(self.points[0], self.points[1])
last_seg = LineSegment(self.points[-2], self.points[-1])
if (direction is None or
direction == self.direction or
direction.is_opp(self.direction)):
first_seg.increment()
else:
new_head = copy.deepcopy(first_seg.head)
new_head.move(direction)
self.points.insert(0, new_head)
self.direction = direction
self.detect_collision()
last_seg.decrement()
if last_seg.length() == 0:
del self.points[-1]
def signal_handler(signal, frame):
curses.endwin()
print("Thanks for playing pambu!")
sys.exit(0)
def main():
signal.signal(signal.SIGINT, signal_handler)
locale.setlocale(locale.LC_ALL, "") # Use system's default encoding
stdscr = curses.initscr() # Initialize
curses.cbreak() # Enter cbreak mode
curses.noecho() # Don't echo any characters
curses.curs_set(0) # Make cursor invisible
stdscr.nodelay(1) # Make getch() non-blocking
stdscr.keypad(1) # Interpret escape sequences
snk = Snake(stdscr.getmaxyx()) # Initialize our Snake!!
ch = None
while ch != curses.ascii.ESC:
stdscr.clear()
direction = None
if ch == curses.KEY_UP:
direction = Direction.north
elif ch == curses.KEY_DOWN:
direction = Direction.south
elif ch == curses.KEY_LEFT:
direction = Direction.west
elif ch == curses.KEY_RIGHT:
direction = Direction.east
snk.move(stdscr, direction)
snk.draw(stdscr)
stdscr.refresh()
curses.napms(200)
ch = stdscr.getch()
curses.flushinp()
curses.endwin()
if __name__ == "__main__":
main()
| isc | 8,667,717,734,897,335,000 | 31.501718 | 130 | 0.533411 | false |
theflofly/tensorflow | tensorflow/tools/docs/doc_generator_visitor.py | 9 | 10118 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be told the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _score_name(self, name):
"""Return a tuple of scores indicating how to sort for the best name.
This function is meant to be used as the `key` to the `sorted` function.
This sorting in order:
Prefers names refering to the defining class, over a subclass.
Prefers names that are not in "contrib".
prefers submodules to the root namespace.
Prefers short names `tf.thing` over `tf.a.b.c.thing`
Sorts lexicographically on name parts.
Args:
name: the full name to score, for example `tf.estimator.Estimator`
Returns:
A tuple of scores. When sorted the preferred name will have the lowest
value.
"""
parts = name.split('.')
short_name = parts[-1]
container = self._index['.'.join(parts[:-1])]
defining_class_score = 1
if tf_inspect.isclass(container):
if short_name in container.__dict__:
# prefer the defining class
defining_class_score = -1
contrib_score = -1
if 'contrib' in parts:
contrib_score = 1
while parts:
container = self._index['.'.join(parts)]
if tf_inspect.ismodule(container):
break
parts.pop()
module_length = len(parts)
if len(parts) == 2:
# `tf.submodule.thing` is better than `tf.thing`
module_length_score = -1
else:
# shorter is better
module_length_score = module_length
return (defining_class_score, contrib_score, module_length_score, name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
and py_object is not ()): # pylint: disable=literal-comparison
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
master_name = (
tf_export.get_canonical_name_for_symbol(self._index[names[0]])
if names else None)
if master_name:
master_name = 'tf.%s' % master_name
else:
# Choose the master name with a lexical sort on the tuples returned by
# by _score_name.
master_name = min(names, key=self._score_name)
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
| apache-2.0 | 8,502,523,155,235,694,000 | 34.626761 | 80 | 0.658035 | false |
wartman4404/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/standalone.py | 410 | 46785 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Standalone WebSocket server.
Use this file to launch pywebsocket without Apache HTTP Server.
BASIC USAGE
===========
Go to the src directory and run
$ python mod_pywebsocket/standalone.py [-p <ws_port>]
[-w <websock_handlers>]
[-d <document_root>]
<ws_port> is the port number to use for ws:// connection.
<document_root> is the path to the root directory of HTML files.
<websock_handlers> is the path to the root directory of WebSocket handlers.
If not specified, <document_root> will be used. See __init__.py (or
run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
For more detail and other options, run
$ python mod_pywebsocket/standalone.py --help
or see _build_option_parser method below.
For trouble shooting, adding "--log_level debug" might help you.
TRY DEMO
========
Go to the src directory and run standalone.py with -d option to set the
document root to the directory containing example HTMLs and handlers like this:
$ cd src
$ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example
to launch pywebsocket with the sample handler and html on port 80. Open
http://localhost/console.html, click the connect button, type something into
the text box next to the send button and click the send button. If everything
is working, you'll see the message you typed echoed by the server.
USING TLS
=========
To run the standalone server with TLS support, run it with -t, -k, and -c
options. When TLS is enabled, the standalone server accepts only TLS connection.
Note that when ssl module is used and the key/cert location is incorrect,
TLS connection silently fails while pyOpenSSL fails on startup.
Example:
$ PYTHONPATH=. python mod_pywebsocket/standalone.py \
-d example \
-p 10443 \
-t \
-c ../test/cert/cert.pem \
-k ../test/cert/key.pem \
Note that when passing a relative path to -c and -k option, it will be resolved
using the document root directory as the base.
USING CLIENT AUTHENTICATION
===========================
To run the standalone server with TLS client authentication support, run it with
--tls-client-auth and --tls-client-ca options in addition to ones required for
TLS support.
Example:
$ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example -p 10443 -t \
-c ../test/cert/cert.pem -k ../test/cert/key.pem \
--tls-client-auth \
--tls-client-ca=../test/cert/cacert.pem
Note that when passing a relative path to --tls-client-ca option, it will be
resolved using the document root directory as the base.
CONFIGURATION FILE
==================
You can also write a configuration file and use it by specifying the path to
the configuration file by --config option. Please write a configuration file
following the documentation of the Python ConfigParser library. Name of each
entry must be the long version argument name. E.g. to set log level to debug,
add the following line:
log_level=debug
For options which doesn't take value, please add some fake value. E.g. for
--tls option, add the following line:
tls=True
Note that tls will be enabled even if you write tls=False as the value part is
fake.
When both a command line argument and a configuration file entry are set for
the same configuration item, the command line value will override one in the
configuration file.
THREADING
=========
This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
used for each request.
SECURITY WARNING
================
This uses CGIHTTPServer and CGIHTTPServer is not secure.
It may execute arbitrary Python code or external programs. It should not be
used outside a firewall.
"""
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
import SocketServer
import ConfigParser
import base64
import httplib
import logging
import logging.handlers
import optparse
import os
import re
import select
import socket
import sys
import threading
import time
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import http_header_util
from mod_pywebsocket import memorizingfile
from mod_pywebsocket import util
from mod_pywebsocket.xhr_benchmark_handler import XHRBenchmarkHandler
_DEFAULT_LOG_MAX_BYTES = 1024 * 256
_DEFAULT_LOG_BACKUP_COUNT = 5
_DEFAULT_REQUEST_QUEUE_SIZE = 128
# 1024 is practically large enough to contain WebSocket handshake lines.
_MAX_MEMORIZED_LINES = 1024
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
class _StandaloneConnection(object):
"""Mimic mod_python mp_conn."""
def __init__(self, request_handler):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._request_handler = request_handler
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return (self._request_handler.server.server_name,
self._request_handler.server.server_port)
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr.
Setting the property in __init__ won't work because the request
handler is not initialized yet there."""
return self._request_handler.client_address
remote_addr = property(get_remote_addr)
def write(self, data):
"""Mimic mp_conn.write()."""
return self._request_handler.wfile.write(data)
def read(self, length):
"""Mimic mp_conn.read()."""
return self._request_handler.rfile.read(length)
def get_memorized_lines(self):
"""Get memorized lines."""
return self._request_handler.rfile.get_memorized_lines()
class _StandaloneRequest(object):
"""Mimic mod_python request."""
def __init__(self, request_handler, use_tls):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._logger = util.get_class_logger(self)
self._request_handler = request_handler
self.connection = _StandaloneConnection(request_handler)
self._use_tls = use_tls
self.headers_in = request_handler.headers
def get_uri(self):
"""Getter to mimic request.uri.
This method returns the raw data at the Request-URI part of the
Request-Line, while the uri method on the request object of mod_python
returns the path portion after parsing the raw data. This behavior is
kept for compatibility.
"""
return self._request_handler.path
uri = property(get_uri)
def get_unparsed_uri(self):
"""Getter to mimic request.unparsed_uri."""
return self._request_handler.path
unparsed_uri = property(get_unparsed_uri)
def get_method(self):
"""Getter to mimic request.method."""
return self._request_handler.command
method = property(get_method)
def get_protocol(self):
"""Getter to mimic request.protocol."""
return self._request_handler.request_version
protocol = property(get_protocol)
def is_https(self):
"""Mimic request.is_https()."""
return self._use_tls
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
return False
class _StandaloneSSLConnection(object):
"""A wrapper class for OpenSSL.SSL.Connection to
- provide makefile method which is not supported by the class
- tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
accept the "how" argument.
- convert SysCallError exceptions that its recv method may raise into a
return value of '', meaning EOF. We cannot overwrite the recv method on
self._connection since it's immutable.
"""
_OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
def __init__(self, connection):
self._connection = connection
def __getattribute__(self, name):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__getattribute__(self, name)
return self._connection.__getattribute__(name)
def __setattr__(self, name, value):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__setattr__(self, name, value)
return self._connection.__setattr__(name, value)
def makefile(self, mode='r', bufsize=-1):
return socket._fileobject(self, mode, bufsize)
def shutdown(self, unused_how):
self._connection.shutdown()
def recv(self, bufsize, flags=0):
if flags != 0:
raise ValueError('Non-zero flags not allowed')
try:
return self._connection.recv(bufsize)
except OpenSSL.SSL.SysCallError, (err, message):
if err == -1:
# Suppress "unexpected EOF" exception. See the OpenSSL document
# for SSL_get_error.
return ''
raise
def _alias_handlers(dispatcher, websock_handlers_map_file):
"""Set aliases specified in websock_handler_map_file in dispatcher.
Args:
dispatcher: dispatch.Dispatcher instance
websock_handler_map_file: alias map file
"""
fp = open(websock_handlers_map_file)
try:
for line in fp:
if line[0] == '#' or line.isspace():
continue
m = re.match('(\S+)\s+(\S+)', line)
if not m:
logging.warning('Wrong format in map file:' + line)
continue
try:
dispatcher.add_resource_path_alias(
m.group(1), m.group(2))
except dispatch.DispatchException, e:
logging.error(str(e))
finally:
fp.close()
class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""HTTPServer specialized for WebSocket."""
# Overrides SocketServer.ThreadingMixIn.daemon_threads
daemon_threads = True
# Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, options):
"""Override SocketServer.TCPServer.__init__ to set SSL enabled
socket object to self.socket before server_bind and server_activate,
if necessary.
"""
# Share a Dispatcher among request handlers to save time for
# instantiation. Dispatcher can be shared because it is thread-safe.
options.dispatcher = dispatch.Dispatcher(
options.websock_handlers,
options.scan_dir,
options.allow_handlers_outside_root_dir)
if options.websock_handlers_map_file:
_alias_handlers(options.dispatcher,
options.websock_handlers_map_file)
warnings = options.dispatcher.source_warnings()
if warnings:
for warning in warnings:
logging.warning('Warning in source loading: %s' % warning)
self._logger = util.get_class_logger(self)
self.request_queue_size = options.request_queue_size
self.__ws_is_shut_down = threading.Event()
self.__ws_serving = False
SocketServer.BaseServer.__init__(
self, (options.server_host, options.port), WebSocketRequestHandler)
# Expose the options object to allow handler objects access it. We name
# it with websocket_ prefix to avoid conflict.
self.websocket_server_options = options
self._create_sockets()
self.server_bind()
self.server_activate()
def _create_sockets(self):
self.server_name, self.server_port = self.server_address
self._sockets = []
if not self.server_name:
# On platforms that doesn't support IPv6, the first bind fails.
# On platforms that supports IPv6
# - If it binds both IPv4 and IPv6 on call with AF_INET6, the
# first bind succeeds and the second fails (we'll see 'Address
# already in use' error).
# - If it binds only IPv6 on call with AF_INET6, both call are
# expected to succeed to listen both protocol.
addrinfo_array = [
(socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
(socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
else:
addrinfo_array = socket.getaddrinfo(self.server_name,
self.server_port,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
for addrinfo in addrinfo_array:
self._logger.info('Create socket on: %r', addrinfo)
family, socktype, proto, canonname, sockaddr = addrinfo
try:
socket_ = socket.socket(family, socktype)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
continue
server_options = self.websocket_server_options
if server_options.use_tls:
# For the case of _HAS_OPEN_SSL, we do wrapper setup after
# accept.
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
if server_options.tls_client_auth:
if server_options.tls_client_cert_optional:
client_cert_ = ssl.CERT_OPTIONAL
else:
client_cert_ = ssl.CERT_REQUIRED
else:
client_cert_ = ssl.CERT_NONE
socket_ = ssl.wrap_socket(socket_,
keyfile=server_options.private_key,
certfile=server_options.certificate,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=server_options.tls_client_ca,
cert_reqs=client_cert_,
do_handshake_on_connect=False)
self._sockets.append((socket_, addrinfo))
def server_bind(self):
"""Override SocketServer.TCPServer.server_bind to enable multiple
sockets bind.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Bind on: %r', addrinfo)
if self.allow_reuse_address:
socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
socket_.bind(self.server_address)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
if self.server_address[1] == 0:
# The operating system assigns the actual port number for port
# number 0. This case, the second and later sockets should use
# the same port number. Also self.server_port is rewritten
# because it is exported, and will be used by external code.
self.server_address = (
self.server_name, socket_.getsockname()[1])
self.server_port = self.server_address[1]
self._logger.info('Port %r is assigned', self.server_port)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
def server_activate(self):
"""Override SocketServer.TCPServer.server_activate to enable multiple
sockets listen.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Listen on: %r', addrinfo)
try:
socket_.listen(self.request_queue_size)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
if len(self._sockets) == 0:
self._logger.critical(
'No sockets activated. Use info log level to see the reason.')
def server_close(self):
"""Override SocketServer.TCPServer.server_close to enable multiple
sockets close.
"""
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Close on: %r', addrinfo)
socket_.close()
def fileno(self):
"""Override SocketServer.TCPServer.fileno."""
self._logger.critical('Not supported: fileno')
return self._sockets[0][0].fileno()
def handle_error(self, request, client_address):
"""Override SocketServer.handle_error."""
self._logger.error(
'Exception in processing request from: %r\n%s',
client_address,
util.get_stack_trace())
# Note: client_address is a tuple.
def get_request(self):
"""Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
object with _StandaloneSSLConnection to provide makefile method. We
cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
attribute.
"""
accepted_socket, client_address = self.socket.accept()
server_options = self.websocket_server_options
if server_options.use_tls:
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
try:
accepted_socket.do_handshake()
except ssl.SSLError, e:
self._logger.debug('%r', e)
raise
# Print cipher in use. Handshake is done on accept.
self._logger.debug('Cipher: %s', accepted_socket.cipher())
self._logger.debug('Client cert: %r',
accepted_socket.getpeercert())
elif server_options.tls_module == _TLS_BY_PYOPENSSL:
# We cannot print the cipher in use. pyOpenSSL doesn't provide
# any method to fetch that.
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_privatekey_file(server_options.private_key)
ctx.use_certificate_file(server_options.certificate)
def default_callback(conn, cert, errnum, errdepth, ok):
return ok == 1
# See the OpenSSL document for SSL_CTX_set_verify.
if server_options.tls_client_auth:
verify_mode = OpenSSL.SSL.VERIFY_PEER
if not server_options.tls_client_cert_optional:
verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
ctx.set_verify(verify_mode, default_callback)
ctx.load_verify_locations(server_options.tls_client_ca,
None)
else:
ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
accepted_socket.set_accept_state()
# Convert SSL related error into socket.error so that
# SocketServer ignores them and keeps running.
#
# TODO(tyoshino): Convert all kinds of errors.
try:
accepted_socket.do_handshake()
except OpenSSL.SSL.Error, e:
# Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
# does.
self._logger.debug('%r', e)
raise socket.error(1, '%r' % e)
cert = accepted_socket.get_peer_certificate()
if cert is not None:
self._logger.debug('Client cert subject: %r',
cert.get_subject().get_components())
accepted_socket = _StandaloneSSLConnection(accepted_socket)
else:
raise ValueError('No TLS support module is available')
return accepted_socket, client_address
def serve_forever(self, poll_interval=0.5):
"""Override SocketServer.BaseServer.serve_forever."""
self.__ws_serving = True
self.__ws_is_shut_down.clear()
handle_request = self.handle_request
if hasattr(self, '_handle_request_noblock'):
handle_request = self._handle_request_noblock
else:
self._logger.warning('Fallback to blocking request handler')
try:
while self.__ws_serving:
r, w, e = select.select(
[socket_[0] for socket_ in self._sockets],
[], [], poll_interval)
for socket_ in r:
self.socket = socket_
handle_request()
self.socket = None
finally:
self.__ws_is_shut_down.set()
def shutdown(self):
"""Override SocketServer.BaseServer.shutdown."""
self.__ws_serving = False
self.__ws_is_shut_down.wait()
class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
"""CGIHTTPRequestHandler specialized for WebSocket."""
# Use httplib.HTTPMessage instead of mimetools.Message.
MessageClass = httplib.HTTPMessage
protocol_version = "HTTP/1.1"
def setup(self):
"""Override SocketServer.StreamRequestHandler.setup to wrap rfile
with MemorizingFile.
This method will be called by BaseRequestHandler's constructor
before calling BaseHTTPRequestHandler.handle.
BaseHTTPRequestHandler.handle will call
BaseHTTPRequestHandler.handle_one_request and it will call
WebSocketRequestHandler.parse_request.
"""
# Call superclass's setup to prepare rfile, wfile, etc. See setup
# definition on the root class SocketServer.StreamRequestHandler to
# understand what this does.
CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
self.rfile = memorizingfile.MemorizingFile(
self.rfile,
max_memorized_lines=_MAX_MEMORIZED_LINES)
def __init__(self, request, client_address, server):
self._logger = util.get_class_logger(self)
self._options = server.websocket_server_options
# Overrides CGIHTTPServerRequestHandler.cgi_directories.
self.cgi_directories = self._options.cgi_directories
# Replace CGIHTTPRequestHandler.is_executable method.
if self._options.is_executable_method is not None:
self.is_executable = self._options.is_executable_method
# This actually calls BaseRequestHandler.__init__.
CGIHTTPServer.CGIHTTPRequestHandler.__init__(
self, request, client_address, server)
def parse_request(self):
"""Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
Return True to continue processing for HTTP(S), False otherwise.
See BaseHTTPRequestHandler.handle_one_request method which calls
this method to understand how the return value will be handled.
"""
# We hook parse_request method, but also call the original
# CGIHTTPRequestHandler.parse_request since when we return False,
# CGIHTTPRequestHandler.handle_one_request continues processing and
# it needs variables set by CGIHTTPRequestHandler.parse_request.
#
# Variables set by this method will be also used by WebSocket request
# handling (self.path, self.command, self.requestline, etc. See also
# how _StandaloneRequest's members are implemented using these
# attributes).
if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
return False
if self.command == "CONNECT":
self.send_response(200, "Connected")
self.send_header("Connection", "keep-alive")
self.end_headers()
return False
if self._options.use_basic_auth:
auth = self.headers.getheader('Authorization')
if auth != self._options.basic_auth_credential:
self.send_response(401)
self.send_header('WWW-Authenticate',
'Basic realm="Pywebsocket"')
self.end_headers()
self._logger.info('Request basic authentication')
return False
host, port, resource = http_header_util.parse_uri(self.path)
# Special paths for XMLHttpRequest benchmark
xhr_benchmark_helper_prefix = '/073be001e10950692ccbf3a2ad21c245'
if resource == (xhr_benchmark_helper_prefix + '_send'):
xhr_benchmark_handler = XHRBenchmarkHandler(
self.headers, self.rfile, self.wfile)
xhr_benchmark_handler.do_send()
return False
if resource == (xhr_benchmark_helper_prefix + '_receive'):
xhr_benchmark_handler = XHRBenchmarkHandler(
self.headers, self.rfile, self.wfile)
xhr_benchmark_handler.do_receive()
return False
if resource is None:
self._logger.info('Invalid URI: %r', self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
server_options = self.server.websocket_server_options
if host is not None:
validation_host = server_options.validation_host
if validation_host is not None and host != validation_host:
self._logger.info('Invalid host: %r (expected: %r)',
host,
validation_host)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
if port is not None:
validation_port = server_options.validation_port
if validation_port is not None and port != validation_port:
self._logger.info('Invalid port: %r (expected: %r)',
port,
validation_port)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
self.path = resource
request = _StandaloneRequest(self, self._options.use_tls)
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not self._options.dispatcher.get_handler_suite(self.path):
self._logger.info('No handler for resource: %r',
self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
except dispatch.DispatchException, e:
self._logger.info('Dispatch failed for error: %s', e)
self.send_error(e.status)
return False
# If any Exceptions without except clause setup (including
# DispatchException) is raised below this point, it will be caught
# and logged by WebSocketServer.
try:
try:
handshake.do_handshake(
request,
self._options.dispatcher,
allowDraft75=self._options.allow_draft75,
strict=self._options.strict)
except handshake.VersionException, e:
self._logger.info('Handshake failed for version error: %s', e)
self.send_response(common.HTTP_STATUS_BAD_REQUEST)
self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
self.end_headers()
return False
except handshake.HandshakeException, e:
# Handshake for ws(s) failed.
self._logger.info('Handshake failed for error: %s', e)
self.send_error(e.status)
return False
request._dispatcher = self._options.dispatcher
self._options.dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
self._logger.info('Aborted: %s', e)
return False
def log_request(self, code='-', size='-'):
"""Override BaseHTTPServer.log_request."""
self._logger.info('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Override BaseHTTPServer.log_error."""
# Despite the name, this method is for warnings than for errors.
# For example, HTTP status code is logged by this method.
self._logger.warning('%s - %s',
self.address_string(),
args[0] % args[1:])
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Add extra check that self.path doesn't contains ..
Also check if the file is a executable file or not.
If the file is not executable, it is handled as static file or dir
rather than a CGI script.
"""
if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
if '..' in self.path:
return False
# strip query parameter from request path
resource_name = self.path.split('?', 2)[0]
# convert resource_name into real path name in filesystem.
scriptfile = self.translate_path(resource_name)
if not os.path.isfile(scriptfile):
return False
if not self.is_executable(scriptfile):
return False
return True
return False
def _get_logger_from_class(c):
return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
def _configure_logging(options):
logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(options.log_level.upper()))
if options.log_file:
handler = logging.handlers.RotatingFileHandler(
options.log_file, 'a', options.log_max, options.log_count)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
deflate_log_level_name = logging.getLevelName(
options.deflate_log_level.upper())
_get_logger_from_class(util._Deflater).setLevel(
deflate_log_level_name)
_get_logger_from_class(util._Inflater).setLevel(
deflate_log_level_name)
def _build_option_parser():
parser = optparse.OptionParser()
parser.add_option('--config', dest='config_file', type='string',
default=None,
help=('Path to configuration file. See the file comment '
'at the top of this file for the configuration '
'file format'))
parser.add_option('-H', '--server-host', '--server_host',
dest='server_host',
default='',
help='server hostname to listen to')
parser.add_option('-V', '--validation-host', '--validation_host',
dest='validation_host',
default=None,
help='server hostname to validate in absolute path.')
parser.add_option('-p', '--port', dest='port', type='int',
default=common.DEFAULT_WEB_SOCKET_PORT,
help='port to listen to')
parser.add_option('-P', '--validation-port', '--validation_port',
dest='validation_port', type='int',
default=None,
help='server port to validate in absolute path.')
parser.add_option('-w', '--websock-handlers', '--websock_handlers',
dest='websock_handlers',
default='.',
help=('The root directory of WebSocket handler files. '
'If the path is relative, --document-root is used '
'as the base.'))
parser.add_option('-m', '--websock-handlers-map-file',
'--websock_handlers_map_file',
dest='websock_handlers_map_file',
default=None,
help=('WebSocket handlers map file. '
'Each line consists of alias_resource_path and '
'existing_resource_path, separated by spaces.'))
parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
default=None,
help=('Must be a directory under --websock-handlers. '
'Only handlers under this directory are scanned '
'and registered to the server. '
'Useful for saving scan time when the handler '
'root directory contains lots of files that are '
'not handler file or are handler files but you '
'don\'t want them to be registered. '))
parser.add_option('--allow-handlers-outside-root-dir',
'--allow_handlers_outside_root_dir',
dest='allow_handlers_outside_root_dir',
action='store_true',
default=False,
help=('Scans WebSocket handlers even if their canonical '
'path is not under --websock-handlers.'))
parser.add_option('-d', '--document-root', '--document_root',
dest='document_root', default='.',
help='Document root directory.')
parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
default=None,
help=('CGI paths relative to document_root.'
'Comma-separated. (e.g -x /cgi,/htbin) '
'Files under document_root/cgi_path are handled '
'as CGI programs. Must be executable.'))
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://)')
parser.add_option('--tls-module', '--tls_module', dest='tls_module',
type='choice',
choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
help='Use ssl module if "%s" is specified. '
'Use pyOpenSSL module if "%s" is specified' %
(_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('-k', '--private-key', '--private_key',
dest='private_key',
default='', help='TLS private key file.')
parser.add_option('-c', '--certificate', dest='certificate',
default='', help='TLS certificate file.')
parser.add_option('--tls-client-auth', dest='tls_client_auth',
action='store_true', default=False,
help='Requests TLS client auth on every connection.')
parser.add_option('--tls-client-cert-optional',
dest='tls_client_cert_optional',
action='store_true', default=False,
help=('Makes client certificate optional even though '
'TLS client auth is enabled.'))
parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
help=('Specifies a pem file which contains a set of '
'concatenated CA certificates which are used to '
'validate certificates passed from clients'))
parser.add_option('--basic-auth', dest='use_basic_auth',
action='store_true', default=False,
help='Requires Basic authentication.')
parser.add_option('--basic-auth-credential',
dest='basic_auth_credential', default='test:test',
help='Specifies the credential of basic authentication '
'by username:password pair (e.g. test:test).')
parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
default='', help='Log file.')
# Custom log level:
# - FINE: Prints status of each frame processing step
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warn',
choices=['fine',
'debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level.')
parser.add_option('--deflate-log-level', '--deflate_log_level',
type='choice',
dest='deflate_log_level', default='warn',
choices=['debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level for _Deflater and _Inflater.')
parser.add_option('--thread-monitor-interval-in-sec',
'--thread_monitor_interval_in_sec',
dest='thread_monitor_interval_in_sec',
type='int', default=-1,
help=('If positive integer is specified, run a thread '
'monitor to show the status of server threads '
'periodically in the specified inteval in '
'second. If non-positive integer is specified, '
'disable the thread monitor.'))
parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
default=_DEFAULT_LOG_MAX_BYTES,
help='Log maximum bytes')
parser.add_option('--log-count', '--log_count', dest='log_count',
type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
help='Log backup count')
parser.add_option('--allow-draft75', dest='allow_draft75',
action='store_true', default=False,
help='Obsolete option. Ignored.')
parser.add_option('--strict', dest='strict', action='store_true',
default=False, help='Obsolete option. Ignored.')
parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
default=_DEFAULT_REQUEST_QUEUE_SIZE,
help='request queue size')
return parser
class ThreadMonitor(threading.Thread):
daemon = True
def __init__(self, interval_in_sec):
threading.Thread.__init__(self, name='ThreadMonitor')
self._logger = util.get_class_logger(self)
self._interval_in_sec = interval_in_sec
def run(self):
while True:
thread_name_list = []
for thread in threading.enumerate():
thread_name_list.append(thread.name)
self._logger.info(
"%d active threads: %s",
threading.active_count(),
', '.join(thread_name_list))
time.sleep(self._interval_in_sec)
def _parse_args_and_config(args):
parser = _build_option_parser()
# First, parse options without configuration file.
temporary_options, temporary_args = parser.parse_args(args=args)
if temporary_args:
logging.critical(
'Unrecognized positional arguments: %r', temporary_args)
sys.exit(1)
if temporary_options.config_file:
try:
config_fp = open(temporary_options.config_file, 'r')
except IOError, e:
logging.critical(
'Failed to open configuration file %r: %r',
temporary_options.config_file,
e)
sys.exit(1)
config_parser = ConfigParser.SafeConfigParser()
config_parser.readfp(config_fp)
config_fp.close()
args_from_config = []
for name, value in config_parser.items('pywebsocket'):
args_from_config.append('--' + name)
args_from_config.append(value)
if args is None:
args = args_from_config
else:
args = args_from_config + args
return parser.parse_args(args=args)
else:
return temporary_options, temporary_args
def _main(args=None):
"""You can call this function from your own program, but please note that
this function has some side-effects that might affect your program. For
example, util.wrap_popen3_for_win use in this method replaces implementation
of os.popen3.
"""
options, args = _parse_args_and_config(args=args)
os.chdir(options.document_root)
_configure_logging(options)
if options.allow_draft75:
logging.warning('--allow_draft75 option is obsolete.')
if options.strict:
logging.warning('--strict option is obsolete.')
# TODO(tyoshino): Clean up initialization of CGI related values. Move some
# of code here to WebSocketRequestHandler class if it's better.
options.cgi_directories = []
options.is_executable_method = None
if options.cgi_paths:
options.cgi_directories = options.cgi_paths.split(',')
if sys.platform in ('cygwin', 'win32'):
cygwin_path = None
# For Win32 Python, it is expected that CYGWIN_PATH
# is set to a directory of cygwin binaries.
# For example, websocket_server.py in Chromium sets CYGWIN_PATH to
# full path of third_party/cygwin/bin.
if 'CYGWIN_PATH' in os.environ:
cygwin_path = os.environ['CYGWIN_PATH']
util.wrap_popen3_for_win(cygwin_path)
def __check_script(scriptpath):
return util.get_script_interp(scriptpath, cygwin_path)
options.is_executable_method = __check_script
if options.use_tls:
if options.tls_module is None:
if _import_ssl():
options.tls_module = _TLS_BY_STANDARD_MODULE
logging.debug('Using ssl module')
elif _import_pyopenssl():
options.tls_module = _TLS_BY_PYOPENSSL
logging.debug('Using pyOpenSSL module')
else:
logging.critical(
'TLS support requires ssl or pyOpenSSL module.')
sys.exit(1)
elif options.tls_module == _TLS_BY_STANDARD_MODULE:
if not _import_ssl():
logging.critical('ssl module is not available')
sys.exit(1)
elif options.tls_module == _TLS_BY_PYOPENSSL:
if not _import_pyopenssl():
logging.critical('pyOpenSSL module is not available')
sys.exit(1)
else:
logging.critical('Invalid --tls-module option: %r',
options.tls_module)
sys.exit(1)
if not options.private_key or not options.certificate:
logging.critical(
'To use TLS, specify private_key and certificate.')
sys.exit(1)
if (options.tls_client_cert_optional and
not options.tls_client_auth):
logging.critical('Client authentication must be enabled to '
'specify tls_client_cert_optional')
sys.exit(1)
else:
if options.tls_module is not None:
logging.critical('Use --tls-module option only together with '
'--use-tls option.')
sys.exit(1)
if options.tls_client_auth:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if options.tls_client_cert_optional:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if not options.scan_dir:
options.scan_dir = options.websock_handlers
if options.use_basic_auth:
options.basic_auth_credential = 'Basic ' + base64.b64encode(
options.basic_auth_credential)
try:
if options.thread_monitor_interval_in_sec > 0:
# Run a thread monitor to show the status of server threads for
# debugging.
ThreadMonitor(options.thread_monitor_interval_in_sec).start()
server = WebSocketServer(options)
server.serve_forever()
except Exception, e:
logging.critical('mod_pywebsocket: %s' % e)
logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
sys.exit(1)
if __name__ == '__main__':
_main(sys.argv[1:])
# vi:sts=4 sw=4 et
| mpl-2.0 | -9,203,037,277,841,670,000 | 38.216262 | 80 | 0.589249 | false |
philoniare/horizon | openstack_dashboard/test/integration_tests/tests/test_user_create_delete.py | 16 | 1186 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
class TestUser(helpers.AdminTestCase):
USER_NAME = helpers.gen_random_resource_name("user")
def test_create_delete_user(self):
users_page = self.home_pg.go_to_identity_userspage()
password = self.TEST_PASSWORD
users_page.create_user(self.USER_NAME, password=password,
project='admin', role='admin')
self.assertTrue(users_page.is_user_present(self.USER_NAME))
users_page.delete_user(self.USER_NAME)
self.assertFalse(users_page.is_user_present(self.USER_NAME))
| apache-2.0 | 3,659,609,443,971,908,000 | 42.925926 | 78 | 0.700675 | false |
wevote/WeVoteServer | elected_office/urls.py | 1 | 1066 | # elected_office/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import re_path
from . import views_admin
urlpatterns = [
# views_admin
re_path(r'^$', views_admin.elected_office_list_view, name='elected_office_list', ),
re_path(r'^delete/$', views_admin.elected_office_delete_process_view, name='elected_office_delete_process'),
re_path(r'^edit_process/$', views_admin.elected_office_edit_process_view, name='elected_office_edit_process'),
re_path(r'^new/$', views_admin.elected_office_new_view, name='elected_office_new'),
re_path(r'^update/$', views_admin.elected_office_update_view, name='elected_office_update'),
re_path(r'^real_time_status/$', views_admin.elected_office_update_status, name='elected_office_update_status'),
re_path(r'^(?P<elected_office_id>[0-9]+)/edit/$', views_admin.elected_office_edit_view, name='elected_office_edit'),
re_path(r'^(?P<elected_office_id>[0-9]+)/summary/$', views_admin.elected_office_summary_view,
name='elected_office_summary'),
]
| mit | 5,324,175,033,883,957,000 | 49.761905 | 120 | 0.69137 | false |
vincepandolfo/django | django/contrib/auth/management/commands/changepassword.py | 4 | 2639 | from __future__ import unicode_literals
import getpass
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_migrations_checks = True
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def add_arguments(self, parser):
parser.add_argument('username', nargs='?',
help='Username to change password for; by default, it\'s the current username.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
if options['username']:
username = options['username']
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options['database']).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
password_validated = False
while (p1 != p2 or not password_validated) and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count += 1
# Don't validate passwords that don't match.
continue
try:
validate_password(p2, u)
except ValidationError as err:
self.stderr.write('\n'.join(err.messages))
count += 1
else:
password_validated = True
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| bsd-3-clause | -2,274,539,071,948,456,200 | 34.662162 | 103 | 0.597954 | false |
tensorflow/lingvo | lingvo/tools/beam_utils.py | 1 | 3573 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for car beam pipelines."""
import apache_beam as beam
def BeamInit():
"""Initialize the beam program.
Typically first thing to run in main(). This call is needed before FLAGS
are accessed, for example.
"""
pass
def GetPipelineRoot(options=None):
"""Return the root of the beam pipeline.
Typical usage looks like:
with GetPipelineRoot() as root:
_ = (root | beam.ParDo() | ...)
In this example, the pipeline is automatically executed when the context is
exited, though one can manually run the pipeline built from the root object as
well.
Args:
options: A beam.options.pipeline_options.PipelineOptions object.
Returns:
A beam.Pipeline root object.
"""
return beam.Pipeline(options=options)
def GetReader(record_format, file_pattern, value_coder, **kwargs):
"""Returns a beam Reader based on record_format and file_pattern.
Args:
record_format: String record format, e.g., 'tfrecord'.
file_pattern: String path describing files to be read.
value_coder: Coder to use for the values of each record.
**kwargs: arguments to pass to the corresponding Reader object constructor.
Returns:
A beam reader object.
Raises:
ValueError: If an unsupported record_format is provided.
"""
if record_format == "tfrecord":
return beam.io.ReadFromTFRecord(file_pattern, coder=value_coder, **kwargs)
raise ValueError("Unsupported record format: {}".format(record_format))
def GetWriter(record_format, file_pattern, value_coder, **kwargs):
"""Returns a beam Writer.
Args:
record_format: String record format, e.g., 'tfrecord' to write as.
file_pattern: String path describing files to be written to.
value_coder: Coder to use for the values of each written record.
**kwargs: arguments to pass to the corresponding Writer object constructor.
Returns:
A beam writer object.
Raises:
ValueError: If an unsupported record_format is provided.
"""
if record_format == "tfrecord":
return beam.io.WriteToTFRecord(file_pattern, coder=value_coder, **kwargs)
raise ValueError("Unsupported record format: {}".format(record_format))
def GetEmitterFn(record_format):
"""Returns an Emitter function for the given record_format.
An Emitter function takes in a key and value as arguments and returns
a structure that is compatible with the Beam Writer associated with
the corresponding record_format.
Args:
record_format: String record format, e.g., 'tfrecord' to write as.
Returns:
An emitter function of (key, value) -> Writer's input type.
Raises:
ValueError: If an unsupported record_format is provided.
"""
def _ValueEmitter(key, value):
del key
return [value]
if record_format == "tfrecord":
return _ValueEmitter
raise ValueError("Unsupported record format: {}".format(record_format))
| apache-2.0 | 8,200,211,608,645,151,000 | 30.069565 | 80 | 0.707809 | false |
zofuthan/edx-platform | common/lib/xmodule/xmodule/library_root_xblock.py | 47 | 5037 | """
'library' XBlock (LibraryRoot)
"""
import logging
from xmodule.studio_editable import StudioEditableModule
from xblock.fields import Scope, String, List, Boolean
from xblock.fragment import Fragment
from xblock.core import XBlock
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class LibraryRoot(XBlock):
"""
The LibraryRoot is the root XBlock of a content library. All other blocks in
the library are its children. It contains metadata such as the library's
display_name.
"""
display_name = String(
help=_("Enter the name of the library as it should appear in Studio."),
default="Library",
display_name=_("Library Display Name"),
scope=Scope.settings
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your library."),
scope=Scope.settings,
xml_node=True,
)
show_children_previews = Boolean(
display_name="Hide children preview",
help="Choose if preview of library contents is shown",
scope=Scope.user_state,
default=True
)
has_children = True
has_author_view = True
def __unicode__(self):
return u"Library: {}".format(self.display_name)
def __str__(self):
return unicode(self).encode('utf-8')
def author_view(self, context):
"""
Renders the Studio preview view.
"""
fragment = Fragment()
self.render_children(context, fragment, can_reorder=False, can_add=True)
return fragment
def render_children(self, context, fragment, can_reorder=False, can_add=False): # pylint: disable=unused-argument
"""
Renders the children of the module with HTML appropriate for Studio. Reordering is not supported.
"""
contents = []
paging = context.get('paging', None)
children_count = len(self.children) # pylint: disable=no-member
item_start, item_end = 0, children_count
# TODO sort children
if paging:
page_number = paging.get('page_number', 0)
raw_page_size = paging.get('page_size', None)
page_size = raw_page_size if raw_page_size is not None else children_count
item_start, item_end = page_size * page_number, page_size * (page_number + 1)
children_to_show = self.children[item_start:item_end] # pylint: disable=no-member
force_render = context.get('force_render', None)
for child_key in children_to_show:
# Children must have a separate context from the library itself. Make a copy.
child_context = context.copy()
child_context['show_preview'] = self.show_children_previews
child_context['can_edit_visibility'] = False
child = self.runtime.get_block(child_key)
child_view_name = StudioEditableModule.get_preview_view_name(child)
if unicode(child.location) == force_render:
child_context['show_preview'] = True
if child_context['show_preview']:
rendered_child = self.runtime.render_child(child, child_view_name, child_context)
else:
rendered_child = self.runtime.render_child_placeholder(child, child_view_name, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': unicode(child.location),
'content': rendered_child.content,
})
fragment.add_content(
self.runtime.render_template("studio_render_paged_children_view.html", {
'items': contents,
'xblock_context': context,
'can_add': can_add,
'first_displayed': item_start,
'total_children': children_count,
'displayed_children': len(children_to_show),
'previews': self.show_children_previews
})
)
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.scope_ids.usage_id.course_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.scope_ids.usage_id.course_key.library
@XBlock.json_handler
def trigger_previews(self, request_body, suffix): # pylint: disable=unused-argument
""" Enable or disable previews in studio for library children. """
self.show_children_previews = request_body.get('showChildrenPreviews', self.show_children_previews)
return {'showChildrenPreviews': self.show_children_previews}
| agpl-3.0 | -1,216,554,820,114,427,400 | 36.311111 | 118 | 0.625174 | false |
mnunberg/couchbase-python-client | examples/search_keywords.py | 1 | 3016 | #!/usr/bin/env python
#
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file demonstrates some of the functionalities available with
# view queries. This creates a bunch of key-value pairs where the value is
# a number. It also creates a view to index the key-value pairs by the
# number itself, and finally queries the view to return the ten items with
# the highest values.
from argparse import ArgumentParser
import random
import pprint
from couchbase.bucket import Bucket
ap = ArgumentParser()
ap.add_argument('-D', '--create-design', default=False,
action='store_true',
help='whether to create the design')
ap.add_argument('-n', '--number-of-terms', default=10,
type=int, help="How many terms to generate")
options = ap.parse_args()
c = Bucket('couchbase://localhost/default')
DESIGN = {
'_id': '_design/search_keywords',
'language': 'javascript',
'views': {
'top_keywords': {
'map':
"""
function(doc) {
if (typeof doc === 'number') {
emit(doc, null);
}
}
"""
}
}
}
if options.create_design:
bm = c.bucket_manager()
bm.design_create('search_keywords', DESIGN, use_devmode=False, syncwait=5)
NOUNS = ['cow', 'cat', 'dog', 'computer', 'WMD']
ADJECTIVES = ['happy', 'sad', 'thoughtful', 'extroverted']
kv = {}
for x in range(options.number_of_terms):
n = random.choice(NOUNS)
a = random.choice(ADJECTIVES)
kv[" ".join([a, n])] = random.randint(1, 100000)
c.upsert_multi(kv)
vret = c.query('search_keywords',
'top_keywords',
limit=10,
descending=True)
for row in vret:
pprint.pprint(row, indent=4)
# Sample output:
#[ { u'id': u'WMD sad', u'key': 92772, u'value': None},
# { u'id': u'WMD thoughtful', u'key': 76222, u'value': None},
# { u'id': u'cow happy', u'key': 71984, u'value': None},
# { u'id': u'computer sad', u'key': 68849, u'value': None},
# { u'id': u'cat thoughtful', u'key': 68417, u'value': None},
# { u'id': u'computer thoughtful', u'key': 67518, u'value': None},
# { u'id': u'dog thoughtful', u'key': 67350, u'value': None},
# { u'id': u'computer extroverted', u'key': 63279, u'value': None},
# { u'id': u'cow thoughtful', u'key': 60962, u'value': None},
# { u'id': u'cow sad', u'key': 49510, u'value': None}]
| apache-2.0 | 4,338,166,786,258,678,300 | 30.416667 | 78 | 0.613395 | false |
jackru/pybrain | pybrain/rl/environments/simplerace/simpleracetcp.py | 25 | 4101 | from __future__ import print_function
__author__ = 'Julian Togelius, julian@idsia.ch'
from pybrain.rl.environments import Environment
from math import sqrt
import socket
import string
from scipy import zeros
class SimpleraceEnvironment(Environment):
firstCarScore = 0
secondCarScore = 0
lastStepCurrentWp = [0, 0]
lastStepNextWp = [0, 0]
indim = 2
outdim = 7
def __init__(self, host="127.0.0.1", port=6524):
self.theSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.theSocket.connect((host, port))
self.step = 0
print("Connected to a simplerace server")
self.reset()
self.serverIsReady = False
def getSensors(self):
return self.sensors
def performAction(self, action):
# there is a nicer way of doing the following, but i'll wait with that until
# i'm a bit more fluent in Python
if (action[0] > 0.3):
if(action[1]) > 0.3:
command = 8
elif(action[1]) < -0.3:
command = 6
else:
command = 7
elif (action[0] < -0.3):
if(action[1]) > 0.3:
command = 2
elif(action[1]) < -0.3:
command = 0
else:
command = 1
else:
if(action[1]) > 0.3:
command = 5
elif(action[1]) < -0.3:
command = 3
else:
command = 4
if self.waitOne:
print('Waiting one step')
self.waitOne = False
elif self.serverIsReady:
self.theSocket.send (str(command) + "\n")
else:
print("not sending")
# get and process the answer
data = ""
while len (data) < 2:
data = self.theSocket.recv(1000)
#print("received", data)
inputs = string.split(str(data), " ")
if (inputs[0][:5] == "reset"):
print("Should we reset the scores here?")
self.reset ()
self.serverIsReady = True
self.waitOne = True
elif (inputs[0] == "data"):
inputs[2:20] = list(map(float, inputs[2:20]))
self.sensors = inputs[2:9]
currentWp = [inputs[18], inputs[19]]
# check that this is not the first step of an episode
if (self.lastStepCurrentWp[0] != 0):
# check if a way point position has changed
if (currentWp[0] != self.lastStepCurrentWp[0]):
# check that we don't have a server side change of episode
if (currentWp[0] != self.lastStepNextWp[0]):
print(("%.3f %.3f %.3f %.3f " % (currentWp[0], currentWp[1], self.lastStepNextWp[0], self.lastStepNextWp[1])))
raise Exception("Unexpected episode change")
else:
# all is fine, increase score. but for who?
ownPosition = [inputs[9], inputs[10]]
otherPosition = [inputs[14], inputs[15]]
if (self.euclideanDistance(ownPosition, self.lastStepCurrentWp) < self.euclideanDistance(otherPosition, self.lastStepCurrentWp)):
self.firstCarScore += 1
else:
self.secondCarScore += 1
# store old way point positions
self.lastStepCurrentWp = currentWp
self.step += 1
elif (len (inputs[0]) < 2):
print("impossible!")
else:
print(("incomprehensible and thus roundly ignored", data))
def reset(self):
self.step = 0
self.firstCarScore = 0
self.secondCarScore = 0
self.lastStepCurrentWp = [0, 0]
self.lastStepNextWp = [0, 0]
self.sensors = zeros(self.outdim)
self.waitOne = False
def euclideanDistance(self, firstPoint, secondPoint):
return sqrt ((firstPoint[0] - secondPoint[0]) ** 2 + (firstPoint[1] - secondPoint[1]) ** 2)
| bsd-3-clause | -927,768,900,925,742,200 | 35.616071 | 153 | 0.52158 | false |
arkatebi/DynamicalSystems | toggleSwitch/tSwitch-det-pSet-3.py | 1 | 9567 | #/usr/bin/env python
import auxiliary_functions as aux
import PyDSTool as dst
from PyDSTool import common as cmn
import numpy as np
from matplotlib import pyplot as plt
import sys
#------------------------------------------------------------------------------#
def defineSystem():
'''
Create an object that defines the desired ODE system.
'''
# Create an object of args class from common module
DSargs = cmn.args(name='Toggle switch of two genes X and Y')
# Set the parameters:
DSargs.pars = aux.parameter_set_3()
# Set the variables:
DSargs.varspecs = aux.equations()
# Set the auxiliary functions:
DSargs.fnspecs = aux.functions()
# Set initial conditions:
DSargs.ics = {'X': 10, 'Y': 10}
DSargs.xdomain = {'X': [0, 1.0e+4], 'Y':[0, 1.0e+4]}
# Set the range of integration:
DSargs.tdomain = [0,100]
return DSargs
#------------------------------------------------------------------------------#
def t_dynamics_X(pts):
# PyPlot commands
plt.plot(pts['t'], pts['X'])
plt.xlabel('t') # Axes labels
plt.ylabel('X') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_Y(pts):
# PyPlot commands
plt.plot(pts['t'], pts['Y'])
plt.xlabel('t') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_XY(pts):
# PyPlot commands
plt.plot(pts['X'], pts['Y'])
plt.xlabel('X') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,800]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs X')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs Y')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_XY(ode):
plt.figure()
plt.ylim([0,900])
# Sequences of plot commands will not clear existing figures:
plt.hold(True)
for i, x0 in enumerate(np.linspace(1,1000,4)):
for i, y0 in enumerate(np.linspace(1,1000,4)):
# Reset the initial conditions in the Vode_ODEsystem object ode:
ode.set(ics = { 'X': x0, 'Y': y0 } )
# Trajectories are called pol0, pol1, ...
# Sample them on the fly to create tmp, a Pointset object:
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['X'], tmp['Y'])
plt.xlabel('X')
plt.ylabel('Y')
#plt.title(ode.name + ': multi ICs for both')
plt.show()
#plt.savefig('./figures/parSet-1_tdynamics.pdf')
#------------------------------------------------------------------------------#
def getBifDiagrams(ode):
freepar='gX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
freepar='gY'
fp=aux.fast_fixedpoint(ode)
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=1e-2, step=1e-1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='kX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='kY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='lX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='lY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
#------------------------------------------------------------------------------#
def getNullClines(DSargs, ode):
from PyDSTool.Toolbox import phaseplane as pp
vlim = {'X': [1, 700], 'Y': [1, 700]}
fp = aux.eliminate_redundants(pp.find_fixedpoints(ode, n=2, maxsearch=1e+4,
eps=1e-12),
4)
stab = aux.stability(fp, ode)
for i in range(len(fp)):
print(stab[i], fp[i])
nfp=0
aux.nullclines(['X','Y'], DSargs, stab, fp, nfp=nfp, vlim=vlim,
maxpoints=[800,800],
xticks=[0, 100, 200, 300, 400, 500, 600, 700],
yticks=[0, 100, 200, 300, 400, 500, 600, 700],
step=0.01, minstep=0.001, maxstep=10, fs=[3,3],
fontsize=8, silence=False)
#------------------------------------------------------------------------------#
if __name__ == '__main__':
DSargs = defineSystem()
# Obtain a Vode_ODEsystem object:
# (similar to VODE from SciPy)
ode = dst.Generator.Vode_ODEsystem(DSargs)
# Obtain a Trajectory object (integrate ODE):
traj = ode.compute('polarization')
# Collect data points as a Pointset object:
pts = traj.sample(dt=0.01)
#t_dynamics_X(pts)
#t_dynamics_Y(pts)
#t_dynamics_XY(pts)
#t_dynamics_multi_ICs_X(ode)
#t_dynamics_multi_ICs_Y(ode)
#t_dynamics_multi_ICs_XY(ode)
#getBifDiagrams(ode)
getNullClines(DSargs, ode)
| gpl-3.0 | -2,041,364,280,348,762,000 | 37.732794 | 80 | 0.486987 | false |
thomasgilgenast/spqr-nonrel | django/contrib/localflavor/uy/forms.py | 310 | 2083 | # -*- coding: utf-8 -*-
"""
UY-specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms.fields import Select, RegexField
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.uy.util import get_validation_digit
class UYDepartamentSelect(Select):
"""
A Select widget that uses a list of Uruguayan departaments as its choices.
"""
def __init__(self, attrs=None):
from uy_departaments import DEPARTAMENT_CHOICES
super(UYDepartamentSelect, self).__init__(attrs, choices=DEPARTAMENT_CHOICES)
class UYCIField(RegexField):
"""
A field that validates Uruguayan 'Cedula de identidad' (CI) numbers.
"""
default_error_messages = {
'invalid': _("Enter a valid CI number in X.XXX.XXX-X,"
"XXXXXXX-X or XXXXXXXX format."),
'invalid_validation_digit': _("Enter a valid CI number."),
}
def __init__(self, *args, **kwargs):
super(UYCIField, self).__init__(r'(?P<num>(\d{6,7}|(\d\.)?\d{3}\.\d{3}))-?(?P<val>\d)',
*args, **kwargs)
def clean(self, value):
"""
Validates format and validation digit.
The official format is [X.]XXX.XXX-X but usually dots and/or slash are
omitted so, when validating, those characters are ignored if found in
the correct place. The three typically used formats are supported:
[X]XXXXXXX, [X]XXXXXX-X and [X.]XXX.XXX-X.
"""
value = super(UYCIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = self.regex.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = int(match.group('num').replace('.', ''))
validation_digit = int(match.group('val'))
if not validation_digit == get_validation_digit(number):
raise ValidationError(self.error_messages['invalid_validation_digit'])
return value
| bsd-3-clause | 4,501,689,286,524,326,000 | 33.716667 | 95 | 0.62554 | false |
catapult-project/catapult | dashboard/dashboard/update_test_suites_test.py | 3 | 13778 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import update_test_suites
from dashboard.common import descriptor
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class ListTestSuitesTest(testing_common.TestCase):
def setUp(self):
super(ListTestSuitesTest, self).setUp()
app = webapp2.WSGIApplication([
('/update_test_suites', update_test_suites.UpdateTestSuitesHandler)
])
self.testapp = webtest.TestApp(app)
testing_common.SetIsInternalUser('internal@chromium.org', True)
self.UnsetCurrentUser()
stored_object.Set(descriptor.PARTIAL_TEST_SUITES_KEY, [
'TEST_PARTIAL_TEST_SUITE',
])
stored_object.Set(descriptor.GROUPABLE_TEST_SUITE_PREFIXES_KEY, [
'TEST_GROUPABLE%',
])
descriptor.Descriptor.ResetMemoizedConfigurationForTesting()
def testFetchCachedTestSuites_NotEmpty(self):
# If the cache is set, then whatever's there is returned.
key = namespaced_stored_object.NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, {'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
def _AddSampleData(self):
testing_common.AddTests(
['Chromium'], ['win7', 'mac'], {
'dromaeo': {
'dom': {},
'jslib': {},
},
'scrolling': {
'commit_time': {
'www.yahoo.com': {},
'www.cnn.com': {},
},
'commit_time_ref': {},
},
'really': {
'nested': {
'very': {
'deeply': {
'subtest': {}
}
},
'very_very': {}
}
},
})
def testPost_ForcesCacheUpdate(self):
key = namespaced_stored_object.NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, {'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
self._AddSampleData()
# Because there is something cached, the cache is
# not automatically updated when new data is added.
self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
stored_object.Set(
namespaced_stored_object.NamespaceKey(
update_test_suites.TEST_SUITES_2_CACHE_KEY), ['foo'])
self.assertEqual(['foo'], update_test_suites.FetchCachedTestSuites2())
# Making a request to /udate_test_suites forces an update.
self.testapp.post('/update_test_suites')
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
self.assertEqual(['dromaeo', 'really', 'scrolling'],
update_test_suites.FetchCachedTestSuites2())
def testPost_InternalOnly(self):
self.SetCurrentUser('internal@chromium.org')
self._AddSampleData()
master_key = ndb.Key('Master', 'Chromium')
graph_data.Bot(
id='internal_mac', parent=master_key, internal_only=True).put()
t = graph_data.TestMetadata(
id='Chromium/internal_mac/internal_test', internal_only=True)
t.UpdateSheriff()
t.put()
self.testapp.post('/update_test_suites?internal_only=true')
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'internal_test': {
'mas': {
'Chromium': {
'internal_mac': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchCachedTestSuites_Empty_UpdatesWhenFetching(self):
# If the cache is not set at all, then FetchCachedTestSuites
# just updates the cache before returning the list.
self._AddSampleData()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchSuites_BasicDescription(self):
self._AddSampleData()
for test_path in ['Chromium/win7/scrolling', 'Chromium/mac/scrolling']:
test = utils.TestKey(test_path).get()
test.description = 'Description string.'
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
'des': 'Description string.'
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
}, update_test_suites.FetchCachedTestSuites())
def testFetchSuites_DifferentMasters(self):
# If the cache is not set at all, then FetchCachedTestSuites
# just updates the cache before returning the list.
self._AddSampleData()
testing_common.AddTests(['ChromiumFYI'], ['linux'], {
'sunspider': {
'Total': {},
},
})
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'sunspider': {
'mas': {
'ChromiumFYI': {
'linux': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_SingleDeprecatedBot(self):
self._AddSampleData()
# For another test suite, set it as deprecated on both bots -- it should
# be marked as deprecated in the response dict.
for bot in ['win7']:
test = utils.TestKey('Chromium/%s/really' % bot).get()
test.deprecated = True
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': True
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_AllDeprecatedBots(self):
self._AddSampleData()
# For another test suite, set it as deprecated on both bots -- it should
# be marked as deprecated in the response dict.
for bot in ['win7', 'mac']:
test = utils.TestKey('Chromium/%s/really' % bot).get()
test.deprecated = True
test.UpdateSheriff()
test.put()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'dep': True,
'mas': {
'Chromium': {
'mac': True,
'win7': True
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_BasicMonitored(self):
self._AddSampleData()
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites_MultipleMonitored(self):
self._AddSampleData()
testing_common.AddTests(['ChromiumFYI'], ['linux'], {
'dromaeo': {
'foo': {},
},
})
self.assertEqual(
{
'dromaeo': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
},
'ChromiumFYI': {
'linux': False
}
},
},
'scrolling': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
},
},
'really': {
'mas': {
'Chromium': {
'mac': False,
'win7': False
}
}
},
}, update_test_suites._CreateTestSuiteDict())
def testFetchSuites(self):
self._AddSampleData()
suites = update_test_suites._FetchSuites()
suite_keys = [s.key for s in suites]
self.assertEqual(
list(
map(utils.TestKey, [
'Chromium/mac/dromaeo',
'Chromium/mac/really',
'Chromium/mac/scrolling',
'Chromium/win7/dromaeo',
'Chromium/win7/really',
'Chromium/win7/scrolling',
])), suite_keys)
def testGetSubTestPath(self):
key = utils.TestKey('Chromium/mac/my_suite/foo/bar')
self.assertEqual('foo/bar', update_test_suites._GetTestSubPath(key))
def testPartialTestSuites(self):
testing_common.AddTests(['master'], ['bot'], {
'TEST_PARTIAL_TEST_SUITE': {
'COMPOSITE': {
'measurement': {},
},
},
})
self.testapp.post('/update_test_suites')
self.assertEqual(['TEST_PARTIAL_TEST_SUITE:COMPOSITE'],
update_test_suites.FetchCachedTestSuites2())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -5,500,318,224,445,049,000 | 28.503212 | 80 | 0.409784 | false |
Odingod/mne-python | mne/gui/tests/test_kit2fiff_gui.py | 13 | 3412 | # Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from nose.tools import assert_true, assert_false, assert_equal
import mne
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.io import Raw
from mne.utils import _TempDir, requires_traits, run_tests_if_main
mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
sqd_path = os.path.join(kit_data_dir, 'test.sqd')
hsp_path = os.path.join(kit_data_dir, 'test_hsp.txt')
fid_path = os.path.join(kit_data_dir, 'test_elp.txt')
fif_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
warnings.simplefilter('always')
@requires_traits
def test_kit2fiff_model():
"""Test CombineMarkersModel Traits Model"""
from mne.gui._kit2fiff_gui import Kit2FiffModel, Kit2FiffPanel
tempdir = _TempDir()
tgt_fname = os.path.join(tempdir, 'test-raw.fif')
model = Kit2FiffModel()
assert_false(model.can_save)
model.markers.mrk1.file = mrk_pre_path
model.markers.mrk2.file = mrk_post_path
model.sqd_file = sqd_path
model.hsp_file = hsp_path
assert_false(model.can_save)
model.fid_file = fid_path
# export raw
assert_true(model.can_save)
raw_out = model.get_raw()
raw_out.save(tgt_fname)
raw = Raw(tgt_fname)
# Compare exported raw with the original binary conversion
raw_bin = Raw(fif_path)
trans_bin = raw.info['dev_head_t']['trans']
want_keys = list(raw_bin.info.keys())
assert_equal(sorted(want_keys), sorted(list(raw.info.keys())))
trans_transform = raw_bin.info['dev_head_t']['trans']
assert_allclose(trans_transform, trans_bin, 0.1)
# Averaging markers
model.markers.mrk3.method = "Average"
trans_avg = model.dev_head_trans
assert_false(np.all(trans_avg == trans_transform))
assert_allclose(trans_avg, trans_bin, 0.1)
# Test exclusion of one marker
model.markers.mrk3.method = "Transform"
model.use_mrk = [1, 2, 3, 4]
assert_false(np.all(model.dev_head_trans == trans_transform))
assert_false(np.all(model.dev_head_trans == trans_avg))
assert_false(np.all(model.dev_head_trans == np.eye(4)))
# test setting stim channels
model.stim_slope = '+'
events_bin = mne.find_events(raw_bin, stim_channel='STI 014')
model.stim_chs = '<'
raw = model.get_raw()
events = mne.find_events(raw, stim_channel='STI 014')
assert_array_equal(events, events_bin)
events_rev = events_bin.copy()
events_rev[:, 2] = 1
model.stim_chs = '>'
raw = model.get_raw()
events = mne.find_events(raw, stim_channel='STI 014')
assert_array_equal(events, events_rev)
model.stim_chs = 'man'
model.stim_chs_manual = list(range(167, 159, -1))
raw = model.get_raw()
events = mne.find_events(raw, stim_channel='STI 014')
assert_array_equal(events, events_bin)
# test reset
model.clear_all()
assert_equal(model.use_mrk, [0, 1, 2, 3, 4])
assert_equal(model.sqd_file, "")
os.environ['_MNE_GUI_TESTING_MODE'] = 'true'
try:
with warnings.catch_warnings(record=True): # traits warnings
warnings.simplefilter('always')
Kit2FiffPanel()
finally:
del os.environ['_MNE_GUI_TESTING_MODE']
run_tests_if_main()
| bsd-3-clause | 546,221,746,524,923,460 | 31.188679 | 69 | 0.66823 | false |
t-artistik/browserscope | categories/richtext2/tests/forwarddelete.py | 14 | 15254 |
FORWARDDELETE_TESTS = {
'id': 'FD',
'caption': 'Forward-Delete Tests',
'command': 'forwardDelete',
'checkAttrs': True,
'checkStyle': False,
'Proposed': [
{ 'desc': '',
'tests': [
]
},
{ 'desc': 'forward-delete single characters',
'tests': [
{ 'id': 'CHAR-1_SC',
'desc': 'Delete 1 character',
'pad': 'foo^barbaz',
'expected': 'foo^arbaz' },
{ 'id': 'CHAR-2_SC',
'desc': 'Delete 1 pre-composed character o with diaeresis',
'pad': 'fo^öbarbaz',
'expected': 'fo^barbaz' },
{ 'id': 'CHAR-3_SC',
'desc': 'Delete 1 character with combining diaeresis above',
'pad': 'fo^öbarbaz',
'expected': 'fo^barbaz' },
{ 'id': 'CHAR-4_SC',
'desc': 'Delete 1 character with combining diaeresis below',
'pad': 'fo^o̤barbaz',
'expected': 'fo^barbaz' },
{ 'id': 'CHAR-5_SC',
'desc': 'Delete 1 character with combining diaeresis above and below',
'pad': 'fo^ö̤barbaz',
'expected': 'fo^barbaz' },
{ 'id': 'CHAR-6_SC',
'desc': 'Delete 1 character with enclosing square',
'pad': 'fo^o⃞barbaz',
'expected': 'fo^barbaz' },
{ 'id': 'CHAR-7_SC',
'desc': 'Delete 1 character with combining long solidus overlay',
'pad': 'fo^o̸barbaz',
'expected': 'fo^barbaz' }
]
},
{ 'desc': 'forward-delete text selections',
'tests': [
{ 'id': 'TEXT-1_SI',
'desc': 'Delete text selection',
'pad': 'foo[bar]baz',
'expected': 'foo^baz' },
{ 'id': 'B-1_SE',
'desc': 'Forward-delete at end of span',
'pad': 'foo<b>bar^</b>baz',
'expected': 'foo<b>bar^</b>az' },
{ 'id': 'B-1_SB',
'desc': 'Forward-delete from position before span',
'pad': 'foo^<b>bar</b>baz',
'expected': 'foo^<b>ar</b>baz' },
{ 'id': 'B-1_SW',
'desc': 'Delete selection that wraps the whole span content',
'pad': 'foo<b>[bar]</b>baz',
'expected': 'foo^baz' },
{ 'id': 'B-1_SO',
'desc': 'Delete selection that wraps the whole span',
'pad': 'foo[<b>bar</b>]baz',
'expected': 'foo^baz' },
{ 'id': 'B-1_SL',
'desc': 'Delete oblique selection that starts before span',
'pad': 'foo[bar<b>baz]quoz</b>quuz',
'expected': 'foo^<b>quoz</b>quuz' },
{ 'id': 'B-1_SR',
'desc': 'Delete oblique selection that ends after span',
'pad': 'foo<b>bar[baz</b>quoz]quuz',
'expected': 'foo<b>bar^</b>quuz' },
{ 'id': 'B.I-1_SM',
'desc': 'Delete oblique selection that starts and ends in different spans',
'pad': 'foo<b>bar[baz</b><i>qoz]quuz</i>quuuz',
'expected': 'foo<b>bar^</b><i>quuz</i>quuuz' },
{ 'id': 'GEN-1_SE',
'desc': 'Delete at end of span with generated content',
'pad': 'foo<gen>bar^</gen>baz',
'expected': 'foo<gen>bar^</gen>az' },
{ 'id': 'GEN-1_SB',
'desc': 'Delete from position before span with generated content',
'pad': 'foo^<gen>bar</gen>baz',
'expected': 'foo^<gen>ar</gen>baz' }
]
},
{ 'desc': 'forward-delete paragraphs',
'tests': [
{ 'id': 'P2-1_SE1',
'desc': 'Delete from collapsed selection at end of paragraph - should merge with next',
'pad': '<p>foobar^</p><p>bazqoz</p>',
'expected': '<p>foobar^bazqoz</p>' },
{ 'id': 'P2-1_SI1',
'desc': 'Delete non-collapsed selection at end of paragraph - should not merge with next',
'pad': '<p>foo[bar]</p><p>bazqoz</p>',
'expected': '<p>foo^</p><p>bazqoz</p>' },
{ 'id': 'P2-1_SM',
'desc': 'Delete non-collapsed selection spanning 2 paragraphs - should merge them',
'pad': '<p>foo[bar</p><p>baz]qoz</p>',
'expected': '<p>foo^qoz</p>' }
]
},
{ 'desc': 'forward-delete lists and list items',
'tests': [
{ 'id': 'OL-LI2-1_SO1',
'desc': 'Delete fully wrapped list item',
'pad': 'foo<ol>{<li>bar</li>}<li>baz</li></ol>qoz',
'expected': ['foo<ol>|<li>baz</li></ol>qoz',
'foo<ol><li>^baz</li></ol>qoz'] },
{ 'id': 'OL-LI2-1_SM',
'desc': 'Delete oblique range between list items within same list',
'pad': 'foo<ol><li>ba[r</li><li>b]az</li></ol>qoz',
'expected': 'foo<ol><li>ba^az</li></ol>qoz' },
{ 'id': 'OL-LI-1_SW',
'desc': 'Delete contents of last list item (list should remain)',
'pad': 'foo<ol><li>[foo]</li></ol>qoz',
'expected': ['foo<ol><li>|</li></ol>qoz',
'foo<ol><li>^</li></ol>qoz'] },
{ 'id': 'OL-LI-1_SO',
'desc': 'Delete last list item of list (should remove entire list)',
'pad': 'foo<ol>{<li>foo</li>}</ol>qoz',
'expected': 'foo^qoz' }
]
},
{ 'desc': 'forward-delete with strange selections',
'tests': [
{ 'id': 'HR.BR-1_SM',
'desc': 'Delete selection that starts and ends within nodes that don\'t have children',
'pad': 'foo<hr {>bar<br }>baz',
'expected': 'foo<hr>|<br>baz' }
]
},
{ 'desc': 'forward-delete from immediately before a table',
'tests': [
{ 'id': 'TABLE-1_SB',
'desc': 'Delete from position immediately before table (should have no effect)',
'pad': 'foo^<table><tbody><tr><td>bar</td></tr></tbody></table>baz',
'expected': 'foo^<table><tbody><tr><td>bar</td></tr></tbody></table>baz' }
]
},
{ 'desc': 'forward-delete within table cells',
'tests': [
{ 'id': 'TD-1_SE',
'desc': 'Delete from end of last cell (should have no effect)',
'pad': 'foo<table><tbody><tr><td>bar^</td></tr></tbody></table>baz',
'expected': 'foo<table><tbody><tr><td>bar^</td></tr></tbody></table>baz' },
{ 'id': 'TD2-1_SE1',
'desc': 'Delete from end of inner cell (should have no effect)',
'pad': 'foo<table><tbody><tr><td>bar^</td><td>baz</td></tr></tbody></table>quoz',
'expected': 'foo<table><tbody><tr><td>bar^</td><td>baz</td></tr></tbody></table>quoz' },
{ 'id': 'TD2-1_SM',
'desc': 'Delete with selection spanning 2 cells',
'pad': 'foo<table><tbody><tr><td>ba[r</td><td>b]az</td></tr></tbody></table>quoz',
'expected': 'foo<table><tbody><tr><td>ba^</td><td>az</td></tr></tbody></table>quoz' }
]
},
{ 'desc': 'forward-delete table rows',
'tests': [
{ 'id': 'TR3-1_SO1',
'desc': 'Delete first table row',
'pad': '<table><tbody>{<tr><td>A</td></tr>}<tr><td>B</td></tr><tr><td>C</td></tr></tbody></table>',
'expected': ['<table><tbody>|<tr><td>B</td></tr><tr><td>C</td></tr></tbody></table>',
'<table><tbody><tr><td>^B</td></tr><tr><td>C</td></tr></tbody></table>'] },
{ 'id': 'TR3-1_SO2',
'desc': 'Delete middle table row',
'pad': '<table><tbody><tr><td>A</td></tr>{<tr><td>B</td></tr>}<tr><td>C</td></tr></tbody></table>',
'expected': ['<table><tbody><tr><td>A</td></tr>|<tr><td>C</td></tr></tbody></table>',
'<table><tbody><tr><td>A</td></tr><tr><td>^C</td></tr></tbody></table>'] },
{ 'id': 'TR3-1_SO3',
'desc': 'Delete last table row',
'pad': '<table><tbody><tr><td>A</td></tr><tr><td>B</td></tr>{<tr><td>C</td></tr>}</tbody></table>',
'expected': ['<table><tbody><tr><td>A</td></tr><tr><td>B</td></tr>|</tbody></table>',
'<table><tbody><tr><td>A</td></tr><tr><td>B^</td></tr></tbody></table>'] },
{ 'id': 'TR2rs:2-1_SO1',
'desc': 'Delete first table row where a cell has rowspan 2',
'pad': '<table><tbody>{<tr><td>A</td><td rowspan=2>R</td></tr>}<tr><td>B</td></tr></tbody></table>',
'expected': ['<table><tbody>|<tr><td>B</td><td>R</td></tr></tbody></table>',
'<table><tbody><tr><td>^B</td><td>R</td></tr></tbody></table>'] },
{ 'id': 'TR2rs:2-1_SO2',
'desc': 'Delete second table row where a cell has rowspan 2',
'pad': '<table><tbody><tr><td>A</td><td rowspan=2>R</td></tr>{<tr><td>B</td></tr>}</tbody></table>',
'expected': ['<table><tbody><tr><td>A</td><td>R</td></tr>|</tbody></table>',
'<table><tbody><tr><td>A</td><td>R^</td></tr></tbody></table>'] },
{ 'id': 'TR3rs:3-1_SO1',
'desc': 'Delete first table row where a cell has rowspan 3',
'pad': '<table><tbody>{<tr><td>A</td><td rowspan=3>R</td></tr>}<tr><td>B</td></tr><tr><td>C</td></tr></tbody></table>',
'expected': ['<table><tbody>|<tr><td>A</td><td rowspan="2">R</td></tr><tr><td>C</td></tr></tbody></table>',
'<table><tbody><tr><td>^A</td><td rowspan="2">R</td></tr><tr><td>C</td></tr></tbody></table>'] },
{ 'id': 'TR3rs:3-1_SO2',
'desc': 'Delete middle table row where a cell has rowspan 3',
'pad': '<table><tbody><tr><td>A</td><td rowspan=3>R</td></tr>{<tr><td>B</td></tr>}<tr><td>C</td></tr></tbody></table>',
'expected': ['<table><tbody><tr><td>B</td><td rowspan="2">R</td></tr>|<tr><td>C</td></tr></tbody></table>',
'<table><tbody><tr><td>B</td><td rowspan="2">R</td></tr><tr><td>^C</td></tr></tbody></table>'] },
{ 'id': 'TR3rs:3-1_SO3',
'desc': 'Delete last table row where a cell has rowspan 3',
'pad': '<table><tbody><tr><td>A</td><td rowspan=3>R</td></tr><tr><td>B</td></tr>{<tr><td>C</td></tr>}</tbody></table>',
'expected': ['<table><tbody><tr><td>A</td><td rowspan="2">R</td></tr><tr><td>B</td></tr>|</tbody></table>',
'<table><tbody><tr><td>A</td><td rowspan="2">R</td></tr><tr><td>B^</td></tr></tbody></table>'] }
]
},
{ 'desc': 'delete with non-editable nested content',
'tests': [
{ 'id': 'DIV:ce:false-1_SO',
'desc': 'Delete nested non-editable <div>',
'pad': 'foo[bar<div contenteditable="false">NESTED</div>baz]qoz',
'expected': 'foo^qoz' },
{ 'id': 'DIV:ce:false-1_SB',
'desc': 'Delete from immediately before a nested non-editable <div> (should be no-op)',
'pad': 'foobar^<div contenteditable="false">NESTED</div>bazqoz',
'expected': 'foobar^<div contenteditable="false">NESTED</div>bazqoz' },
{ 'id': 'DIV:ce:false-1_SL',
'desc': 'Delete nested non-editable <div> with oblique selection',
'pad': 'foo[bar<div contenteditable="false">NES]TED</div>bazqoz',
'expected': [ 'foo^<div contenteditable="false">NESTED</div>bazqoz',
'foo<div contenteditable="false">[NES]TED</div>bazqoz' ] },
{ 'id': 'DIV:ce:false-1_SR',
'desc': 'Delete nested non-editable <div> with oblique selection',
'pad': 'foobar<div contenteditable="false">NES[TED</div>baz]qoz',
'expected': [ 'foobar<div contenteditable="false">NESTED</div>^qoz',
'foobar<div contenteditable="false">NES[TED]</div>qoz' ] },
{ 'id': 'DIV:ce:false-1_SI',
'desc': 'Delete inside nested non-editable <div> (should be no-op)',
'pad': 'foobar<div contenteditable="false">NE[ST]ED</div>bazqoz',
'expected': 'foobar<div contenteditable="false">NE[ST]ED</div>bazqoz' }
]
},
{ 'desc': 'Delete with display:inline-block',
'checkStyle': True,
'tests': [
{ 'id': 'SPAN:d:ib-1_SC',
'desc': 'Delete inside an inline-block <span>',
'pad': 'foo<span style="display: inline-block">bar^baz</span>qoz',
'expected': 'foo<span style="display: inline-block">bar^az</span>qoz' },
{ 'id': 'SPAN:d:ib-1_SA',
'desc': 'Delete from immediately before an inline-block <span>',
'pad': 'foo^<span style="display: inline-block">barbaz</span>qoz',
'expected': 'foo^<span style="display: inline-block">arbaz</span>qoz' },
{ 'id': 'SPAN:d:ib-2_SL',
'desc': 'Delete with nested inline-block <span>, oblique selection',
'pad': 'foo[DEL<span style="display: inline-block">ETE]bar</span>baz',
'expected': 'foo^<span style="display: inline-block">bar</span>baz' },
{ 'id': 'SPAN:d:ib-3_SR',
'desc': 'Delete with nested inline-block <span>, oblique selection',
'pad': 'foo<span style="display: inline-block">bar[DEL</span>ETE]baz',
'expected': 'foo<span style="display: inline-block">bar^</span>baz' },
{ 'id': 'SPAN:d:ib-4i_SI',
'desc': 'Delete with nested inline-block <span>, oblique selection',
'pad': 'foo<span style="display: inline-block">bar[DELETE]baz</span>qoz',
'expected': 'foo<span style="display: inline-block">bar^baz</span>qoz' },
{ 'id': 'SPAN:d:ib-4l_SI',
'desc': 'Delete with nested inline-block <span>, oblique selection',
'pad': 'foo<span style="display: inline-block">[DELETE]barbaz</span>qoz',
'expected': 'foo<span style="display: inline-block">^barbaz</span>qoz' },
{ 'id': 'SPAN:d:ib-4r_SI',
'desc': 'Delete with nested inline-block <span>, oblique selection',
'pad': 'foo<span style="display: inline-block">barbaz[DELETE]</span>qoz',
'expected': 'foo<span style="display: inline-block">barbaz^</span>qoz' }
]
}
]
}
| apache-2.0 | 7,594,298,882,866,077,000 | 47.425397 | 137 | 0.456339 | false |
slightstone/SickRage | lib/hachoir_parser/image/tga.py | 90 | 2927 | """
Truevision Targa Graphic (TGA) picture parser.
Author: Victor Stinner
Creation: 18 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size)
| gpl-3.0 | -4,023,663,025,822,488,600 | 33.435294 | 86 | 0.567475 | false |
nacc/autotest | mirror/config-sample.py | 6 | 4369 | """
Sample configuration file for the "mirror" script that will use
rsync://rsync.kernel.org to fetch a kernel file list and schedule jobs on new
kernel releases.
This file has to be valid python code executed by the "mirror" script. The file
may define and do anything but the following "names" are special:
- a global name "source" is expected to implement get_new_files() method which
will be used by "mirror" to fetch the list of new files
- an optional global iteratable of regular expression strings named
"filter_exprs" where for each regular expression if there is a match group
named "arg" then the original kernel filename will be replaced with the
contents of that group; if no such match group is defined then all the filename
will be considered (if there is at least one regular expression that matches
the filename, otherwise the filename is just filtered out); if "filter_exprs"
is not defined (or defined to be empty) then no filtering is performed
- an optional "trigger" instance of a trigger class; by default this is
initialized with trigger.trigger() but you can set it to another instance
(of your own site specific trigger class); even if you don't set it you
most certainly want to add a couple of actions to the trigger instance to
be executed for the new kernels (by default the list is empty and nothing
will happen with the new kernels other than being included in the known
kernels database so future lookups will not consider them new again)
"""
from autotest.mirror import database, source as source_module
from autotest.mirror import trigger as trigger_module
# create a database object where to store information about known files
db = database.dict_database('rsync.kernel.org.db')
# create a source object that will be used to fetch the list of new kernel
# files (this example uses rsync_source)
source = source_module.rsync_source(db,
'rsync://rsync.kernel.org/pub/linux/kernel',
excludes=('2.6.0-test*/', 'broken-out/', '*.sign', '*.gz'))
source.add_path('v2.6/patch-2.6.*.bz2', 'v2.6')
source.add_path('v2.6/linux-2.6.[0-9].tar.bz2', 'v2.6')
source.add_path('v2.6/linux-2.6.[0-9][0-9].tar.bz2', 'v2.6')
source.add_path('v2.6/testing/patch*.bz2', 'v2.6/testing')
source.add_path('v2.6/snapshots/*.bz2', 'v2.6/snapshots')
source.add_path('people/akpm/patches/2.6/*', 'akpm')
# Given a list of files filter and transform it for entries that look like
# legitimate releases (may be empty in which case no filtering/transformation
# is done). If you want to replace the matched filename to only a part of it
# put the part you want extracted in a match group named "arg".
filter_exprs = (
# The major tarballs
r'^(.*/)?linux-(?P<arg>2\.6\.\d+)\.tar\.bz2$',
# Stable releases
r'^(.*/)?patch-(?P<arg>2\.6\.\d+\.\d+)\.bz2$',
# -rc releases
r'^(.*/)?patch-(?P<arg>2\.6\.\d+-rc\d+)\.bz2$',
# -git releases
r'^(.*/)?patch-(?P<arg>2\.6\.\d+(-rc\d+)?-git\d+)\.bz2$',
# -mm tree
r'^(.*/)?(?P<arg>2\.6\.\d+(-rc\d+)?-mm\d+)\.bz2$',
)
# associate kernel versions with kernel config files
# all machines have the same hardware configuration so they will all
# use the same mapping for kernel version -> kernel config file
_common_kernel_config = {
'2.6.20': '/path/to/2.6.20.config',
'2.6.25': '~/kernel-2.6.25.config',
'2.6.29': 'http://somesite/configs/2.6.29.conf',
}
# a mapping of machine -> machine_info (containing list a of test names as
# they are named in the frontend database and kernel version association to
# kernel config filenames)
_tests_map = {
'mach1': trigger_module.map_action.machine_info(
('test1', 'server test2'), _common_kernel_config),
'mach2': trigger_module.map_action.machine_info(
('test1',), _common_kernel_config),
'mach3': trigger_module.map_action.machine_info(
('test3',), _common_kernel_config),
'mach4': trigger_module.map_action.machine_info(
('test4',), _common_kernel_config),
}
# no need to instantiate trigger_module.trigger() as it's already done so
# trigger = trigger_module.trigger()
# now register some trigger actions otherwise nothing will be done for the new
# kernel versions
trigger = trigger_module.trigger()
trigger.add_action(trigger_module.map_action(_tests_map, 'kerntest-%s'))
trigger.add_action(trigger_module.email_action('test@test.com'))
| gpl-2.0 | -5,366,178,294,447,292,000 | 45.978495 | 79 | 0.706111 | false |
wenxer/fbone | fbone/settings/views.py | 8 | 2765 | # -*- coding: utf-8 -*-
import os
import hashlib
from datetime import datetime
from flask import Blueprint, render_template, current_app, request, flash
from flask.ext.login import login_required, current_user
from ..extensions import db
from ..user import User
from ..utils import allowed_file, make_dir
from .forms import ProfileForm, PasswordForm
settings = Blueprint('settings', __name__, url_prefix='/settings')
@settings.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
user = User.query.filter_by(name=current_user.name).first_or_404()
form = ProfileForm(obj=user.user_detail,
email=current_user.email,
role_code=current_user.role_code,
status_code=current_user.status_code,
next=request.args.get('next'))
if form.validate_on_submit():
if form.avatar_file.data:
upload_file = request.files[form.avatar_file.name]
if upload_file and allowed_file(upload_file.filename):
# Don't trust any input, we use a random string as filename.
# or use secure_filename:
# http://flask.pocoo.org/docs/patterns/fileuploads/
user_upload_dir = os.path.join(current_app.config['UPLOAD_FOLDER'], "user_%s" % user.id)
current_app.logger.debug(user_upload_dir)
make_dir(user_upload_dir)
root, ext = os.path.splitext(upload_file.filename)
today = datetime.now().strftime('_%Y-%m-%d')
# Hash file content as filename.
hash_filename = hashlib.sha1(upload_file.read()).hexdigest() + "_" + today + ext
user.avatar = hash_filename
avatar_ab_path = os.path.join(user_upload_dir, user.avatar)
# Reset file curso since we used read()
upload_file.seek(0)
upload_file.save(avatar_ab_path)
form.populate_obj(user)
form.populate_obj(user.user_detail)
db.session.add(user)
db.session.commit()
flash('Public profile updated.', 'success')
return render_template('settings/profile.html', user=user,
active="profile", form=form)
@settings.route('/password', methods=['GET', 'POST'])
@login_required
def password():
user = User.query.filter_by(name=current_user.name).first_or_404()
form = PasswordForm(next=request.args.get('next'))
if form.validate_on_submit():
form.populate_obj(user)
user.password = form.new_password.data
db.session.add(user)
db.session.commit()
flash('Password updated.', 'success')
return render_template('settings/password.html', user=user,
active="password", form=form)
| bsd-3-clause | -8,871,672,834,084,120,000 | 32.719512 | 104 | 0.620253 | false |
newmediamedicine/indivo_server_1_0 | indivo/tests/integration/test_modules/messaging.py | 1 | 2631 | import data
PRD = 'prd'
from utils import *
def test_messaging(IndivoClient):
try:
BODY = 'body'
SUBJECT = 'subject'
MSG_ID = 'message_id'
SEVERITY = 'severity'
admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret)
admin_client.set_app_id(data.app_email)
account_id = admin_client.create_account(data.account03)[PRD]['Account'][0]
admin_client.add_auth_system(account_id=account_id, data={'system':'password', 'username':data.account03['username'], 'password':data.account03['user_pass']})
record_id = admin_client.create_record(data=data.contact).response['prd']['Record'][0]
admin_client.set_record_owner(data=account_id)
admin_client.setup_app(record_id=record_id, app_id=data.app_email)
admin_client.message_record(data={SUBJECT : data.message01[SUBJECT],
BODY : data.message01[BODY],
SEVERITY: data.message01[SEVERITY]},
message_id = data.message01[MSG_ID])
admin_client.message_account(account_id = account_id,
data= { SUBJECT : data.message02[SUBJECT],
BODY : data.message02[BODY],
MSG_ID : data.message02[MSG_ID],
SEVERITY : data.message02[SEVERITY]})
token = admin_client.setup_app( record_id = record_id,
app_id = data.app_email).response[PRD]
user_client = IndivoClient(data.app_email, data.app_secret)
user_client.update_token(token)
user_client.set_app_id(data.app_email)
user_client.get_messages(record_id = record_id)
chrome_client = IndivoClient(data.chrome_consumer_key, data.chrome_consumer_secret)
chrome_client.create_session(data.account03)
#
# check that archival removes one of the messages
#
def num_messages():
messages = xpath(parse_xml(chrome_client.account_inbox(account_id = data.account03['account_id'])), "/Messages/Message")
return len(messages)
num_messages_before = num_messages()
message_id = xpath(parse_xml(chrome_client.account_inbox(account_id = data.account03['account_id'])), "/Messages/Message/@id")[0]
chrome_client.account_message_archive(account_id = data.account03['account_id'], message_id = message_id)
num_messages_after = num_messages()
assert num_messages_before - num_messages_after == 1, "message didn't get archived"
except Exception, e:
return False, e
return True
| gpl-3.0 | -3,509,521,892,431,867,000 | 40.109375 | 162 | 0.617636 | false |
anaruse/chainer | tests/chainer_tests/utils_tests/test_type_check.py | 2 | 11918 | import sys
import unittest
import numpy
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check as T
class TestConstant(unittest.TestCase):
def setUp(self):
self.x = T.Constant(10)
def test_str(self):
self.assertEqual('10', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class TestVariable(unittest.TestCase):
def setUp(self):
self.x = T.Variable(10, 'x')
def test_str(self):
self.assertEqual('x', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class Object(object):
def __init__(self):
self.value = 10
class TestGetAttr(unittest.TestCase):
def setUp(self):
x = Object()
self.value = T.GetAttr(T.Variable(x, 'x'), 'value')
self.value2 = T.GetAttr(T.Variable(x, 'x'), T.Constant('value'))
self.value3 = T.GetAttr(T.Variable(x, 'x'), 3)
def test_str(self):
self.assertEqual('x.value', str(self.value))
self.assertEqual('x.value', str(self.value2))
self.assertEqual('getattr(x, 3)', str(self.value3))
def test_eval(self):
self.assertEqual(10, self.value.eval())
class TestGetItem(unittest.TestCase):
def setUp(self):
x = T.Variable([1, 2, 3], 'x')
y = T.Variable({'a': 1, 'b': 2}, 'y')
self.x = x
self.v1 = T.GetItem(x, 1)
self.v2 = T.GetItem(y, 'a')
def test_str(self):
self.assertEqual('x[1]', str(self.v1))
self.assertEqual("y['a']", str(self.v2))
x = self.x
self.assertEqual('x[:]', str(x[:]))
self.assertEqual('x[:]', str(x[::]))
self.assertEqual('x[1:]', str(x[1:]))
self.assertEqual('x[:2]', str(x[:2]))
self.assertEqual('x[1:2]', str(x[1:2]))
self.assertEqual('x[1::1]', str(x[1::1]))
self.assertEqual('x[:2:1]', str(x[:2:1]))
self.assertEqual('x[1:2:1]', str(x[1:2:1]))
self.assertEqual('x[...]', str(x[...]))
self.assertEqual('x[0, 1]', str(x[0, 1]))
self.assertEqual('x[1:2, ...]', str(x[1:2:, ...]))
def test_eval(self):
self.assertEqual(2, self.v1.eval())
self.assertEqual(1, self.v2.eval())
class TestCall(unittest.TestCase):
def setUp(self):
f = T.Variable(sum, 'sum')
self.c1 = T.Call(f, ([1, 2, 3],))
self.c2 = f([1, 2, 3])
self.c3 = T.Call(f, (['', 1],))
def test_str(self):
self.assertEqual('sum([1, 2, 3])', str(self.c1))
self.assertEqual('sum([1, 2, 3])', str(self.c2))
self.assertEqual('sum([\'\', 1])', str(self.c3))
def test_eval(self):
self.assertEqual(6, self.c1.eval())
self.assertEqual(6, self.c2.eval())
# an error is occured in `eval`
with self.assertRaises(TypeError):
self.assertEqual(6, self.c3.eval())
class TestBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
def f(x, y):
return x, y
self.op1 = T.BinaryOperator(7, x, y, '+', f)
self.op2 = T.BinaryOperator(8, x, y, '+', f)
self.op3 = T.BinaryOperator(9, x, y, '+', f)
self.op4 = T.BinaryOperator(7, x, y, '+', f, True)
self.op5 = T.BinaryOperator(8, x, y, '+', f, True)
self.op6 = T.BinaryOperator(9, x, y, '+', f, True)
def test_str(self):
self.assertEqual('x + y', str(self.op1))
self.assertEqual('x + (y)', str(self.op2))
self.assertEqual('(x) + (y)', str(self.op3))
self.assertEqual('x + y', str(self.op4))
self.assertEqual('(x) + y', str(self.op5))
self.assertEqual('(x) + (y)', str(self.op6))
def test_eval(self):
self.assertEqual((1, 1), self.op1.eval())
class TestUnaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
def f(x):
return x,
self.op1 = T.UnaryOperator(8, x, '-', f)
self.op2 = T.UnaryOperator(9, x, '-', f)
def test_str(self):
self.assertEqual('-x', str(self.op1))
self.assertEqual('-(x)', str(self.op2))
def test_eval(self):
self.assertEqual((1, ), self.op1.eval())
class TestOperators(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(1, 'y')
def test_str(self):
x = self.x
y = self.y
self.assertEqual('x + y', str(x + y))
self.assertEqual('1 + x', str(1 + x))
self.assertEqual('x - y', str(x - y))
self.assertEqual('1 - x', str(1 - x))
self.assertEqual('x * y', str(x * y))
self.assertEqual('1 * x', str(1 * x))
self.assertEqual('x / y', str(x / y))
self.assertEqual('1 / x', str(1 / x))
self.assertEqual('x // y', str(x // y))
self.assertEqual('1 // x', str(1 // x))
self.assertEqual('x % y', str(x % y))
self.assertEqual('1 % x', str(1 % x))
self.assertEqual('x ** y', str(x ** y))
self.assertEqual('x ** y', str(pow(x, y)))
self.assertEqual('x << y', str(x << y))
self.assertEqual('1 << x', str(1 << x))
self.assertEqual('x >> y', str(x >> y))
self.assertEqual('1 >> x', str(1 >> x))
self.assertEqual('x & y', str(x & y))
self.assertEqual('1 & x', str(1 & x))
self.assertEqual('x ^ y', str(x ^ y))
self.assertEqual('1 ^ x', str(1 ^ x))
self.assertEqual('x | y', str(x | y))
self.assertEqual('1 | x', str(1 | x))
self.assertEqual('-x', str(-x))
self.assertEqual('+x', str(+x))
self.assertEqual('~x', str(~x))
# left-associative
self.assertEqual('x + x - x', str(x + x - x))
self.assertEqual('x + (x - x)', str(x + (x - x)))
self.assertEqual('x << (x << x)', str(x << (x << x)))
# right-associative
self.assertEqual('x ** x ** x', str(x ** x ** x))
self.assertEqual('x ** x ** x', str(x ** (x ** x)))
self.assertEqual('(x ** x) ** x', str((x ** x) ** x))
self.assertEqual('-(x + x)', str(-(x + x)))
# pow has higher priority than unary operators
self.assertEqual('-x ** x', str(-x ** x))
self.assertEqual('(-x) ** x', str((-x) ** x))
def test_priority(self):
x = self.x
y = self.y
self.assertTrue((x << y).priority == (x >> y).priority)
self.assertTrue((x + y).priority == (x - y).priority)
self.assertTrue((x * y).priority ==
(x / y).priority ==
(x // y).priority ==
(x % y).priority)
self.assertTrue((-x).priority == (+x).priority == (~x).priority)
self.assertTrue((x | y).priority <
(x ^ y).priority <
(x & y).priority <
(x << y).priority <
(x + y).priority <
(x * y).priority <
(-x).priority <
(x ** y).priority <
x.priority)
class TestDivOperator(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(2, 'y')
def test_div(self):
# Behavior of '/' operator for int depends on the version of Python
if sys.version_info < (3, 0, 0):
self.assertEqual(0, (self.x / self.y).eval())
else:
self.assertEqual(0.5, (self.x / self.y).eval())
class TestGetType(unittest.TestCase):
def test_empty(self):
ts = T.get_types((), 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(0, len(ts))
self.assertEqual('name', ts.name)
def test_simple(self):
data = (numpy.zeros((1, 2, 3)).astype(numpy.float32),)
ts = T.get_types(data, 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(1, len(ts))
self.assertEqual('name', ts.name)
t = ts[0]
self.assertIsInstance(t, T.Expr)
self.assertEqual(1, t.shape[0].eval())
self.assertEqual(2, t.shape[1].eval())
self.assertEqual(3, t.shape[2].eval())
self.assertEqual(3, t.ndim.eval())
self.assertEqual(numpy.float32, t.dtype.eval())
def test_invalid_arg(self):
with self.assertRaises(AssertionError):
T.get_types(1, 'name', False)
class TestBoolBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
z = T.Variable(2, 'z')
def f(x, y):
return x == y
self.op1 = T.BoolBinaryOperator(x, y, '==', '!=', f)
self.op2 = T.BoolBinaryOperator(x, z, '==', '!=', f)
def test_eval(self):
self.assertTrue(self.op1.eval())
def test_expect(self):
with self.assertRaises(T.InvalidType):
self.op2.expect()
def test_bool(self):
with self.assertRaises(RuntimeError):
bool(self.op1)
def test_bool_operator(self):
with self.assertRaises(RuntimeError):
not self.op1
class TestLazyGetItem(unittest.TestCase):
def setUp(self):
self.t = T.Constant(0)
def test_evaluate_size(self):
# __getitem__, __getattr__ and __call__ only make syntax trees, but
# they are not evalated yet
self.assertIsInstance(self.t[1], T.Expr)
self.assertIsInstance(self.t.x, T.Expr)
self.assertIsInstance(self.t(), T.Expr)
# an error is raised on evaluation time
with self.assertRaises(TypeError):
self.t[1].eval()
with self.assertRaises(AttributeError):
self.t.x.eval()
with self.assertRaises(TypeError):
self.t().eval()
class TestListItem(unittest.TestCase):
def test_eval_list_items(self):
self.assertTrue((T.Constant([0]) == [T.Constant(0)]).eval())
def test_list_str(self):
self.assertEqual('[0]', T._repr([T.Constant(0)]))
def test_eval_tuple_items(self):
self.assertTrue((T.Constant((0,)) == (T.Constant(0),)).eval())
def test_tuple_str(self):
self.assertEqual('()', T._repr(()))
self.assertEqual('(0,)', T._repr((T.Constant(0),)))
self.assertEqual('(0, 0)', T._repr((T.Constant(0), T.Constant(0))))
def test_eval_nest_list(self):
self.assertTrue((T.Constant([[0]]) == [[T.Constant(0)]]).eval())
def test_nest_list_str(self):
self.assertEqual('[[0]]', T._repr([[T.Constant(0)]]))
class TestProd(unittest.TestCase):
def test_name(self):
p = T.prod([])
self.assertEqual(str(p), 'prod([])')
def test_value(self):
value = T.prod([2, 3]).eval()
self.assertEqual(value, 6)
class TestSameTypes(unittest.TestCase):
def test_all_numpy_array(self):
x = numpy.array([0])
y = numpy.array([1])
z = numpy.array([2])
self.assertTrue(T.same_types(x, y, z))
def test_all_numpy_subclasses(self):
x = numpy.array([0])
y = numpy.array([[1], [2]])
z = numpy.matrix("3,4; 5,6")
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_all_cupy_array(self):
x = cuda.cupy.array([0])
y = cuda.cupy.array([1])
z = cuda.cupy.array([2])
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_1(self):
x = numpy.array([0])
y = cuda.cupy.array([1])
z = numpy.array([2])
self.assertFalse(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_2(self):
x = cuda.cupy.array([0])
y = numpy.array([1])
z = cuda.cupy.array([2])
self.assertFalse(T.same_types(x, y, z))
testing.run_module(__name__, __file__)
| mit | -6,694,363,271,389,401,000 | 29.09596 | 75 | 0.52685 | false |
JamesTFarrington/flask | flask/debughelpers.py | 318 | 6024 | # -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from ._compat import implements_to_string, text_type
from .app import Flask
from .blueprints import Blueprint
from .globals import _request_ctx_stack
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = ['You tried to access the file "%s" in the request.files '
'dictionary but it does not exist. The mimetype for the request '
'is "%s" instead of "multipart/form-data" which means that no '
'file contents were transmitted. To fix this error you should '
'provide enctype="multipart/form-data" in your form.' %
(key, request.mimetype)]
if form_matches:
buf.append('\n\nThe browser instead transmitted some file names. '
'This was submitted: %s' % ', '.join('"%s"' % x
for x in form_matches))
self.msg = ''.join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = ['A request was sent to this URL (%s) but a redirect was '
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)]
# In case just a slash was appended we can be extra helpful
if request.base_url + '/' == exc.new_url.split('?')[0]:
buf.append(' The URL was defined with a trailing slash so '
'Flask will automatically redirect to the URL '
'with the trailing slash if it was accessed '
'without one.')
buf.append(' Make sure to directly send your %s-request to this URL '
'since we can\'t make browsers or HTTP clients redirect '
'with form data reliably or without user interaction.' %
request.method)
buf.append('\n\nNote: this exception is only raised in debug mode')
AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
def _dump_loader_info(loader):
yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
for key, value in sorted(loader.__dict__.items()):
if key.startswith('_'):
continue
if isinstance(value, (tuple, list)):
if not all(isinstance(x, (str, text_type)) for x in value):
continue
yield '%s:' % key
for item in value:
yield ' - %s' % item
continue
elif not isinstance(value, (str, text_type, int, float, bool)):
continue
yield '%s: %r' % (key, value)
def explain_template_loading_attempts(app, template, attempts):
"""This should help developers understand what failed"""
info = ['Locating template "%s":' % template]
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name,
srcobj.import_name)
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (
idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line)
if triple is None:
detail = 'no match'
else:
detail = 'found (%r)' % (triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail)
seems_fishy = False
if total_found == 0:
info.append('Error: the template could not be found.')
seems_fishy = True
elif total_found > 1:
info.append('Warning: multiple loaders returned a match for the template.')
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that '
'belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
app.logger.info('\n'.join(info))
| bsd-3-clause | 153,376,277,367,974,400 | 37.864516 | 83 | 0.586819 | false |
PeterDaveHello/eden | modules/templates/CRMT/controllers.py | 20 | 3102 | # -*- coding: utf-8 -*-
from gluon import current
#from gluon.html import *
from gluon.storage import Storage
from s3 import S3CustomController
THEME = "CRMT"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
# Latest Activities
db = current.db
s3db = current.s3db
atable = s3db.project_activity
query = (atable.deleted == False)
output["total_activities"] = db(query).count()
#gtable = s3db.gis_location
#query &= (atable.location_id == gtable.id)
ogtable = s3db.org_group
ltable = s3db.project_activity_group
query &= (atable.id == ltable.activity_id) & \
(ogtable.id == ltable.group_id)
rows = db(query).select(atable.id,
atable.name,
atable.date,
#gtable.L3,
ogtable.name,
limitby = (0, 3),
orderby = ~atable.date
)
latest_activities = []
current.deployment_settings.L10n.date_format = "%d %b %y"
drepresent = atable.date.represent
for row in rows:
date = row["project_activity.date"]
if date:
nice_date = drepresent(date)
else:
nice_date = ""
latest_activities.append(Storage(id = row["project_activity.id"],
name = row["project_activity.name"],
date = nice_date,
date_iso = date or "",
org_group = row["org_group.name"],
#location = row["gis_location.L3"],
))
output["latest_activities"] = latest_activities
# Which Map should we link to in "Know your community"?
auth = current.auth
table = s3db.gis_config
if auth.is_logged_in() and auth.user.org_group_id:
# Coalition Map
ogtable = s3db.org_group
og = db(ogtable.id == auth.user.org_group_id).select(ogtable.pe_id,
limitby=(0, 1)
).first()
query = (table.pe_id == og.pe_id)
else:
# Default Map
query = (table.uuid == "SITE_DEFAULT")
config = db(query).select(table.id,
limitby=(0, 1)
).first()
try:
output["config_id"] = config.id
except:
output["config_id"] = None
self._view(THEME, "index.html")
return output
# END =========================================================================
| mit | -5,698,911,844,655,157,000 | 35.494118 | 81 | 0.413282 | false |
2014c2g12/c2g12 | c2wp/w2/static/Brython2.0.0-20140209-164925/Lib/ui/slider.py | 111 | 2275 | import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
def startSlide(e):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(e)
self._startMouseX=pos['x']
self._lastElementLeft = parseInt(self._handle.style.left)
updatePosition(e)
def updatePosition(e):
pos = widget.getMousePosition(e)
#print('mose pos',pos)
_newPos = self._lastElementLeft + pos['x'] - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.style.left = '%spx' % _newPos
#print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| gpl-2.0 | 990,390,930,725,214,300 | 30.597222 | 112 | 0.600879 | false |
uclaros/QGIS | tests/src/python/test_selective_masking.py | 22 | 29190 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVirtualLayerDefinition
From build dir, run: ctest -R PyQgsSelectiveMasking -V
QGIS_PREFIX_PATH=/home/hme/src/QGIS/build_ninja/output PYTHONPATH=/home/hme/src/QGIS/build_ninja/output/python/:/home/hme/src/QGIS/build_ninja/output/python/plugins:/home/hme/src/QGIS/tests/src/python python3 ~/src/QGIS/tests/src/python/test_selective_masking.py
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Hugo Mercier / Oslandia'
__date__ = '28/06/2019'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import (
QSize,
QRectF,
QDir
)
from qgis.PyQt.QtGui import (
QColor,
QImage,
QPainter
)
from qgis.testing import unittest, start_app
from utilities import (
unitTestDataPath,
getTempfilePath,
renderMapToImage,
loadTestFonts,
getTestFont,
openInBrowserTab
)
from qgis.core import (
QgsMapSettings,
QgsCoordinateReferenceSystem,
QgsRectangle,
QgsProject,
QgsSymbolLayerReference,
QgsMapRendererParallelJob,
QgsMapRendererSequentialJob,
QgsRenderChecker,
QgsSimpleMarkerSymbolLayer,
QgsSimpleMarkerSymbolLayerBase,
QgsMarkerSymbol,
QgsMaskMarkerSymbolLayer,
QgsSingleSymbolRenderer,
QgsSymbolLayerId,
QgsSymbolLayerUtils,
QgsMapRendererCache,
QgsUnitTypes,
QgsOuterGlowEffect,
QgsPalLayerSettings,
QgsRuleBasedLabeling,
QgsPalLayerSettings,
QgsProperty,
QgsRenderContext,
QgsVectorLayerSimpleLabeling,
QgsLayout,
QgsLayoutItemPage,
QgsLayoutSize,
QgsLayoutItemMap,
QgsLayoutExporter,
QgsWkbTypes,
)
def renderMapToImageWithTime(mapsettings, parallel=False, cache=None):
"""
Render current map to an image, via multi-threaded renderer
:param QgsMapSettings mapsettings:
:param bool parallel: Do parallel or sequential render job
:rtype: QImage
"""
if parallel:
job = QgsMapRendererParallelJob(mapsettings)
else:
job = QgsMapRendererSequentialJob(mapsettings)
if cache:
job.setCache(cache)
job.start()
job.waitForFinished()
return (job.renderedImage(), job.renderingTime())
class TestSelectiveMasking(unittest.TestCase):
def setUp(self):
self.checker = QgsRenderChecker()
self.checker.setControlPathPrefix("selective_masking")
self.report = "<h1>Python Selective Masking Tests</h1>\n"
self.map_settings = QgsMapSettings()
crs = QgsCoordinateReferenceSystem('epsg:4326')
extent = QgsRectangle(-123.0, 22.7, -76.4, 46.9)
self.map_settings.setBackgroundColor(QColor(152, 219, 249))
self.map_settings.setOutputSize(QSize(420, 280))
self.map_settings.setOutputDpi(72)
self.map_settings.setFlag(QgsMapSettings.Antialiasing, True)
self.map_settings.setFlag(QgsMapSettings.UseAdvancedEffects, False)
self.map_settings.setDestinationCrs(crs)
self.map_settings.setExtent(extent)
# load a predefined QGIS project
self.assertTrue(QgsProject.instance().read(os.path.join(unitTestDataPath(), "selective_masking.qgs")))
self.points_layer = QgsProject.instance().mapLayersByName('points')[0]
self.lines_layer = QgsProject.instance().mapLayersByName('lines')[0]
# line layer with subsymbols
self.lines_layer2 = QgsProject.instance().mapLayersByName('lines2')[0]
# line layer with labels
self.lines_with_labels = QgsProject.instance().mapLayersByName('lines_with_labels')[0]
self.polys_layer = QgsProject.instance().mapLayersByName('polys')[0]
# polygon layer with a rule based labeling
self.polys_layer2 = QgsProject.instance().mapLayersByName('polys2')[0]
# try to fix the font for where labels are defined
# in order to have more stable image comparison tests
for layer in [self.polys_layer, self.lines_with_labels, self.polys_layer2]:
for provider in layer.labeling().subProviders():
settings = layer.labeling().settings(provider)
font = getTestFont()
font.setPointSize(32)
fmt = settings.format()
fmt.setFont(font)
fmt.setNamedStyle('Roman')
fmt.setSize(32)
fmt.setSizeUnit(QgsUnitTypes.RenderPoints)
settings.setFormat(fmt)
if (layer.geometryType == QgsWkbTypes.PolygonGeometry):
settings.placement = QgsPalLayerSettings.OverPoint
layer.labeling().setSettings(settings, provider)
# order layers for rendering
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def check_renderings(self, map_settings, control_name):
"""Test a rendering with different configurations:
- parallel rendering, no cache
- sequential rendering, no cache
- parallel rendering, with cache (rendered two times)
- sequential rendering, with cache (rendered two times)
"""
for do_parallel in [False, True]:
for use_cache in [False, True]:
print("=== parallel", do_parallel, "cache", use_cache)
tmp = getTempfilePath('png')
cache = None
if use_cache:
cache = QgsMapRendererCache()
# render a first time to fill the cache
renderMapToImageWithTime(self.map_settings, parallel=do_parallel, cache=cache)
img, t = renderMapToImageWithTime(self.map_settings, parallel=do_parallel, cache=cache)
img.save(tmp)
print("Image rendered in {}".format(tmp))
self.checker.setControlName(control_name)
self.checker.setRenderedImage(tmp)
suffix = "_parallel" if do_parallel else "_sequential"
res = self.checker.compareImages(control_name + suffix)
self.report += self.checker.report()
self.assertTrue(res)
print("=== Rendering took {}s".format(float(t) / 1000.0))
def test_label_mask(self):
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
self.check_renderings(self.map_settings, "label_mask")
def test_multiple_label_masks_different_sets(self):
# modify labeling settings of the polys layer
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_with_labels.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
# modify labeling settings of the lines layer
label_settings = self.lines_with_labels.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# polygons
QgsSymbolLayerReference(self.polys_layer.id(), QgsSymbolLayerId("", 0)),
])
label_settings.setFormat(fmt)
self.lines_with_labels.labeling().setSettings(label_settings)
# new map settings with a line symbology that has labels
self.map_settings.setLayers([self.points_layer, self.lines_with_labels, self.polys_layer])
self.check_renderings(self.map_settings, "multiple_label_masks_different_sets")
# restore map settings
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
def test_multiple_label_masks_same_set(self):
# modify labeling settings of the polys layer
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_with_labels.id(), QgsSymbolLayerId("", 0)),
])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
# modify labeling settings of the lines layer
label_settings = self.lines_with_labels.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_with_labels.id(), QgsSymbolLayerId("", 0)),
])
label_settings.setFormat(fmt)
self.lines_with_labels.labeling().setSettings(label_settings)
# new map settings with a line symbology that has labels
self.map_settings.setLayers([self.points_layer, self.lines_with_labels, self.polys_layer])
self.check_renderings(self.map_settings, "multiple_label_masks_same_set")
# restore map settings
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
def test_label_mask_subsymbol(self):
# new map settings with a line symbology that has sub symbols
self.map_settings.setLayers([self.points_layer, self.lines_layer2, self.polys_layer])
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# mask only vertical segments of "roads"
QgsSymbolLayerReference(self.lines_layer2.id(), QgsSymbolLayerId("", [1, 0])),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
self.check_renderings(self.map_settings, "label_mask_subsymbol")
# restore original map settings
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
def test_label_mask_dd(self):
""" test label mask with data defined properties """
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
fmt.mask().setEnabled(False)
fmt.mask().setSize(1.0)
fmt.mask().setOpacity(0.42)
# mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
# overwrite with data-defined properties
fmt.dataDefinedProperties().setProperty(QgsPalLayerSettings.MaskEnabled, QgsProperty.fromExpression('1'))
fmt.dataDefinedProperties().setProperty(QgsPalLayerSettings.MaskBufferSize, QgsProperty.fromExpression('4.0'))
fmt.dataDefinedProperties().setProperty(QgsPalLayerSettings.MaskOpacity, QgsProperty.fromExpression('100.0'))
context = QgsRenderContext()
fmt.updateDataDefinedProperties(context)
self.assertEqual(fmt.mask().enabled(), True)
self.assertEqual(fmt.mask().size(), 4.0)
self.assertEqual(fmt.mask().opacity(), 1.0)
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
self.check_renderings(self.map_settings, "label_mask")
def test_label_mask_rule_labeling(self):
# new map settings with a rule based labeling
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer2])
# modify labeling settings of one rule
for child in self.polys_layer2.labeling().rootRule().children():
if child.description() == 'Tadam':
break
label_settings = child.settings()
label_settings.priority = 3
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
label_settings.setFormat(fmt)
child.setSettings(label_settings)
# modify labeling settings of another rule
for child in self.polys_layer2.labeling().rootRule().children():
if child.description() != 'Tadam':
break
label_settings = child.settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the polygons
QgsSymbolLayerReference(self.polys_layer2.id(), QgsSymbolLayerId("", 0)),
])
label_settings.setFormat(fmt)
child.setSettings(label_settings)
self.check_renderings(self.map_settings, "rule_label_mask")
# restore map settings
self.map_settings.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
def test_label_mask_symbol_levels(self):
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
# enable symbol levels
self.lines_layer.renderer().setUsingSymbolLevels(True)
self.check_renderings(self.map_settings, "label_mask_symbol_levels")
def test_symbol_layer_mask(self):
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
self.points_layer.setRenderer(QgsSingleSymbolRenderer(p))
circle_symbol = QgsMarkerSymbol.createSimple({'size': '10'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
mask_layer.setMasks([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
])
# add this mask layer to the point layer
self.points_layer.renderer().symbol().appendSymbolLayer(mask_layer)
self.check_renderings(self.map_settings, "sl_mask")
def test_multiple_masks_same_symbol_layer(self):
"""Test multiple masks that occlude the same symbol layer"""
#
# 1. a symbol layer mask
#
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
self.points_layer.setRenderer(QgsSingleSymbolRenderer(p))
circle_symbol = QgsMarkerSymbol.createSimple({'size': '10'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
mask_layer.setMasks([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
])
# add this mask layer to the point layer
self.points_layer.renderer().symbol().appendSymbolLayer(mask_layer)
#
# 2. a label mask
#
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0))
])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
self.check_renderings(self.map_settings, "multiple_masks_same_sl")
def test_multiple_masks_different_symbol_layers_same_layer(self):
"""Test multiple masks that occlude different symbol layers of the same layer.
The UI should disallow this settings. We test here that only one mask is retained"""
#
# 1. a symbol layer mask
#
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
self.points_layer.setRenderer(QgsSingleSymbolRenderer(p))
circle_symbol = QgsMarkerSymbol.createSimple({'size': '10'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
mask_layer.setMasks([
# the yellow part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 1)),
])
# add this mask layer to the point layer
self.points_layer.renderer().symbol().appendSymbolLayer(mask_layer)
#
# 2. a label mask
#
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0))
])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
self.check_renderings(self.map_settings, "multiple_masks_different_sl")
def test_multiple_masks_different_symbol_layers_same_layer2(self):
"""Test multiple masks that occlude different symbol layers of the same layer - 2nd possible order
The UI should disallow this settings. We test here that only one mask is retained"""
#
# 1. a symbol layer mask
#
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
self.points_layer.setRenderer(QgsSingleSymbolRenderer(p))
circle_symbol = QgsMarkerSymbol.createSimple({'size': '10'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
mask_layer.setMasks([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
])
# add this mask layer to the point layer
self.points_layer.renderer().symbol().appendSymbolLayer(mask_layer)
#
# 2. a label mask
#
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the yellow part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 1))
])
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
self.check_renderings(self.map_settings, "multiple_masks_different_sl2")
def test_mask_symbollayer_preview(self):
#
# Masks should be visible in previews
#
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
circle_symbol = QgsMarkerSymbol.createSimple({'size': '10'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
p.insertSymbolLayer(0, mask_layer)
for control_name, render_function in [
("as_image", lambda: p.asImage(QSize(64, 64)).save(tmp)),
("as_big_preview", lambda: p.bigSymbolPreviewImage().save(tmp)),
("sl_preview", lambda:
QgsSymbolLayerUtils.symbolLayerPreviewIcon(mask_layer,
QgsUnitTypes.RenderPixels,
QSize(64, 64)).pixmap(QSize(64, 64)).save(tmp))
]:
tmp = getTempfilePath('png')
render_function()
self.checker.setControlName(control_name)
self.checker.setRenderedImage(tmp)
res = self.checker.compareImages(control_name, 90)
self.report += self.checker.report()
self.assertTrue(res)
def test_mask_with_effect(self):
p = QgsMarkerSymbol.createSimple({'color': '#fdbf6f', 'size': "7"})
self.points_layer.setRenderer(QgsSingleSymbolRenderer(p))
circle_symbol = QgsMarkerSymbol.createSimple({'size': '12'})
mask_layer = QgsMaskMarkerSymbolLayer()
mask_layer.setSubSymbol(circle_symbol)
mask_layer.setMasks([
# the yellow part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 1)),
])
# add an outer glow effect to the mask layer
blur = QgsOuterGlowEffect.create({"enabled": "1",
"blur_level": "6.445",
"blur_unit": "MM",
"opacity": "1",
"spread": "0.6",
"spread_unit": "MM",
"color1": "0,0,255,255",
"draw_mode": "2"
})
mask_layer.setPaintEffect(blur)
# add this mask layer to the point layer
self.points_layer.renderer().symbol().appendSymbolLayer(mask_layer)
self.check_renderings(self.map_settings, "mask_with_effect")
def test_label_mask_with_effect(self):
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
# add an outer glow effect to the mask
blur = QgsOuterGlowEffect.create({"enabled": "1",
"blur_level": "6.445",
"blur_unit": "MM",
"opacity": "1",
"spread": "0.6",
"spread_unit": "MM",
"color1": "0,0,255,255",
"draw_mode": "2"
})
fmt.mask().setPaintEffect(blur)
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
format = self.polys_layer.labeling().settings().format()
self.assertTrue(format.mask().enabled())
self.check_renderings(self.map_settings, "label_mask_with_effect")
def test_layout_exports(self):
"""Test mask effects in a layout export at 300 dpi"""
# modify labeling settings
label_settings = self.polys_layer.labeling().settings()
fmt = label_settings.format()
# enable a mask
fmt.mask().setEnabled(True)
fmt.mask().setSize(4.0)
# and mask other symbol layers underneath
fmt.mask().setMaskedSymbolLayers([
# the black part of roads
QgsSymbolLayerReference(self.lines_layer.id(), QgsSymbolLayerId("", 0)),
# the black jets
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("B52", 0)),
QgsSymbolLayerReference(self.points_layer.id(), QgsSymbolLayerId("Jet", 0))])
# add an outer glow effect to the mask
blur = QgsOuterGlowEffect.create({"enabled": "1",
"blur_level": "6.445",
"blur_unit": "MM",
"opacity": "1",
"spread": "0.6",
"spread_unit": "MM",
"color1": "0,0,255,255",
"draw_mode": "2"
})
fmt.mask().setPaintEffect(blur)
label_settings.setFormat(fmt)
self.polys_layer.labeling().setSettings(label_settings)
layout = QgsLayout(QgsProject.instance())
page = QgsLayoutItemPage(layout)
page.setPageSize(QgsLayoutSize(50, 33))
layout.pageCollection().addPage(page)
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(1, 1, 48, 32))
map.setFrameEnabled(True)
layout.addLayoutItem(map)
map.setExtent(self.lines_layer.extent())
map.setLayers([self.points_layer, self.lines_layer, self.polys_layer])
image = QImage(591, 591, QImage.Format_RGB32)
image.setDotsPerMeterX(300 / 25.3 * 1000)
image.setDotsPerMeterY(300 / 25.3 * 1000)
image.fill(0)
p = QPainter(image)
exporter = QgsLayoutExporter(layout)
exporter.renderPage(p, 0)
p.end()
tmp = getTempfilePath('png')
image.save(tmp)
control_name = "layout_export"
self.checker.setControlName(control_name)
self.checker.setRenderedImage(tmp)
res = self.checker.compareImages(control_name)
self.report += self.checker.report()
self.assertTrue(res)
if __name__ == '__main__':
start_app()
unittest.main()
| gpl-2.0 | 8,355,357,951,410,459,000 | 40.404255 | 262 | 0.614183 | false |
CapOM/ChromiumGStreamerBackend | chrome/test/chromedriver/embed_js_in_cpp.py | 165 | 1485 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds standalone JavaScript snippets in C++ code.
Each argument to the script must be a file containing an associated JavaScript
function (e.g., evaluate_script.js should contain an evaluateScript function).
This is called the exported function of the script. The entire script will be
put into a C-style string in the form of an anonymous function which invokes
the exported function when called.
"""
import optparse
import os
import sys
import cpp_source
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h js file should be created')
options, args = parser.parse_args()
global_string_map = {}
for js_file in args:
base_name = os.path.basename(js_file)[:-3].title().replace('_', '')
func_name = base_name[0].lower() + base_name[1:]
script_name = 'k%sScript' % base_name
with open(js_file, 'r') as f:
contents = f.read()
script = 'function() { %s; return %s.apply(null, arguments) }' % (
contents, func_name)
global_string_map[script_name] = script
cpp_source.WriteSource('js', 'chrome/test/chromedriver/chrome',
options.directory, global_string_map)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -1,214,148,523,748,064,500 | 32 | 78 | 0.680135 | false |
JoaquimPatriarca/senpy-for-gis | gasp/gdal/manage/general.py | 1 | 3528 | """
General tools for data management
"""
def copy_features(inLyr, outLyr, outDefn, only_geom=True):
"""
Copy the features of one layer to another layer...
If the layers have the same fields, this method could also copy
the tabular data
TODO: See if the input is a layer or not and make arrangements
"""
from osgeo import ogr
for f in inLyr:
geom = f.GetGeometryRef()
new = ogr.Feature(outDefn)
new.SetGeometry(geom)
# Copy tabular data
if not only_geom:
for i in range(0, outDefn.GetFieldCount()):
new.SetField(outDefn.GetFieldDefn(i).GetNameRef(), f.GetField(i))
outLyr.CreateFeature(new)
new.Destroy()
f.Destroy()
def ogr_merge(shp_to_merge, merged_shp, srs=None, fields_to_copy=None):
"""
Merge all listed datasets into a single dataset
"""
import os
from osgeo import ogr
from gasp import goToList
from gasp.oss.info import get_filename
from gasp.gdal import get_driver_name
from gasp.gdal import get_geom_attr
from gasp.gdal.manage.fields import ogr_list_fields_defn
from gasp.gdal.proj import ogr_def_proj
# Create output
o = ogr.GetDriverByName(
get_driver_name(merged_shp)).CreateDataSource(merged_shp)
# Get SRS
if not srs:
from gasp.gdal.proj import get_shp_sref
srsObj = get_shp_sref(shp_to_merge[0])
else:
from gasp.gdal.proj import get_sref_from_epsg
srsObj = get_sref_from_epsg(srs)
olyr = o.CreateLayer(
get_filename(merged_shp, forceLower=True),
srsObj,
geom_type=get_geom_attr(
shp_to_merge[0], name=None, py_cls=True)
)
fields_to_copy = goToList(fields_to_copy)
# Add all fields existing in the inputs
fields_defn = {}
fields_shp = {}
for shp in shp_to_merge:
flds = ogr_list_fields_defn(shp)
fields_shp[shp] = flds.keys()
if not fields_to_copy:
for fld in flds:
if fld not in fields_defn:
fields_defn[fld] = flds[fld].keys()[0]
olyr.CreateField(ogr.FieldDefn(fld, flds[fld].keys()[0]))
else:
for fld in flds:
if fld not in fields_defn and fld in fields_to_copy:
fields_defn[fld] = flds[fld].keys()[0]
olyr.CreateField(ogr.FieldDefn(fld, flds[fld].keys()[0]))
# Join all features together on the same dataset
featDefn = olyr.GetLayerDefn()
for i in range(len(shp_to_merge)):
dt = ogr.GetDriverByName(
get_driver_name(shp_to_merge[i])).Open(shp_to_merge[i], 0)
lyr = dt.GetLayer()
for feat in lyr:
geom = feat.GetGeometryRef()
new = ogr.Feature(featDefn)
new.SetGeometry(geom)
for e in range(0, featDefn.GetFieldCount()):
name = featDefn.GetFieldDefn(e).GetNameRef()
if name in fields_shp[shp_to_merge[i]]:
new.SetField(name, feat.GetField(name))
olyr.CreateFeature(new)
new.Destroy()
feat.Destroy()
dt.Destroy()
o.Destroy()
return merged_shp
| gpl-3.0 | -1,015,224,049,050,324,400 | 28.4 | 81 | 0.541667 | false |
hkariti/ansible | test/runner/lib/sanity/ansible_doc.py | 16 | 3498 | """Sanity test for ansible-doc."""
from __future__ import absolute_import, print_function
import re
from lib.sanity import (
SanityMultipleVersion,
SanityFailure,
SanitySuccess,
SanitySkipped,
SanityMessage,
)
from lib.util import (
SubprocessError,
display,
intercept_command,
)
from lib.ansible_util import (
ansible_environment,
)
from lib.config import (
SanityConfig,
)
class AnsibleDocTest(SanityMultipleVersion):
"""Sanity test for ansible-doc."""
def test(self, args, targets, python_version):
"""
:type args: SanityConfig
:type targets: SanityTargets
:type python_version: str
:rtype: SanityResult
"""
with open('test/sanity/ansible-doc/skip.txt', 'r') as skip_fd:
skip_modules = set(skip_fd.read().splitlines())
modules = sorted(set(m for i in targets.include_external for m in i.modules) -
set(m for i in targets.exclude_external for m in i.modules) -
skip_modules)
if not modules:
return SanitySkipped(self.name, python_version=python_version)
module_paths = dict((t.module, t.path) for t in targets.targets if t.module)
env = ansible_environment(args, color=False)
cmd = ['ansible-doc'] + modules
try:
stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
errors = stderr.strip().splitlines()
messages = [self.parse_error(e, module_paths) for e in errors]
if messages and all(messages):
return SanityFailure(self.name, messages=messages, python_version=python_version)
if status:
summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
return SanityFailure(self.name, summary=summary, python_version=python_version)
if stdout:
display.info(stdout.strip(), verbosity=3)
if stderr:
summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
return SanityFailure(self.name, summary=summary, python_version=python_version)
return SanitySuccess(self.name, python_version=python_version)
@staticmethod
def parse_error(error, module_paths):
"""
:type error: str
:type module_paths: dict[str, str]
:rtype: SanityMessage | None
"""
# example error messages from lib/ansible/cli/doc.py:
# ERROR! module ping missing documentation (or could not parse documentation): expected string or buffer
# [ERROR]: module ping has a documentation error formatting or is missing documentation.
match = re.search(r'^[^ ]*ERROR[^ ]* (?P<type>[^ ]+) (?P<name>[^ ]+) (?P<text>.*)$', error)
if match:
groups = match.groupdict()
error_type = groups['type']
error_name = groups['name']
error_text = groups['text']
if error_type == 'module' and error_name in module_paths:
return SanityMessage(
message=error_text,
path=module_paths[error_name],
)
return None
| gpl-3.0 | -5,135,083,825,371,624,000 | 32 | 138 | 0.602916 | false |
ryfeus/lambda-packs | Tensorflow/source/tensorboard/plugins/distribution/distributions_plugin.py | 5 | 3411 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Distributions (a.k.a. compressed histograms) plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from werkzeug import wrappers
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.distribution import compressor
from tensorboard.plugins.histogram import histograms_plugin
class DistributionsPlugin(base_plugin.TBPlugin):
"""Distributions Plugin for TensorBoard.
This supports both old-style summaries (created with TensorFlow ops
that output directly to the `histo` field of the proto) and new-style
summaries (as created by the `tensorboard.plugins.histogram.summary`
module).
"""
plugin_name = 'distributions'
def __init__(self, context):
"""Instantiates DistributionsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._histograms_plugin = histograms_plugin.HistogramsPlugin(context)
self._multiplexer = context.multiplexer
def get_plugin_apps(self):
return {
'/distributions': self.distributions_route,
'/tags': self.tags_route,
}
def is_active(self):
"""This plugin is active iff any run has at least one histogram tag.
(The distributions plugin uses the same data source as the histogram
plugin.)
"""
return self._histograms_plugin.is_active()
def distributions_impl(self, tag, run):
"""Result of the form `(body, mime_type)`, or `ValueError`."""
(histograms, mime_type) = self._histograms_plugin.histograms_impl(
tag, run, downsample_to=None)
return ([self._compress(histogram) for histogram in histograms],
mime_type)
def _compress(self, histogram):
(wall_time, step, buckets) = histogram
converted_buckets = compressor.compress_histogram(buckets)
return [wall_time, step, converted_buckets]
def index_impl(self):
return self._histograms_plugin.index_impl()
@wrappers.Request.application
def tags_route(self, request):
index = self.index_impl()
return http_util.Respond(request, index, 'application/json')
@wrappers.Request.application
def distributions_route(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
try:
(body, mime_type) = self.distributions_impl(tag, run)
code = 200
except ValueError as e:
(body, mime_type) = (str(e), 'text/plain')
code = 400
return http_util.Respond(request, body, mime_type, code=code)
| mit | 5,950,952,465,529,346,000 | 33.806122 | 80 | 0.701847 | false |
telwertowski/QGIS | tests/src/python/test_qgscolorbutton.py | 35 | 3786 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsColorButton.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.gui import QgsColorButton
from qgis.core import QgsApplication, QgsProjectColorScheme
from qgis.testing import start_app, unittest
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtTest import QSignalSpy
start_app()
class TestQgsColorButton(unittest.TestCase):
def testClearingColors(self):
"""
Test setting colors to transparent
"""
# start with a valid color
button = QgsColorButton()
button.setAllowOpacity(True)
button.setColor(QColor(255, 100, 200, 255))
self.assertEqual(button.color(), QColor(255, 100, 200, 255))
# now set to no color
button.setToNoColor()
# ensure that only the alpha channel has changed - not the other color components
self.assertEqual(button.color(), QColor(255, 100, 200, 0))
def testNulling(self):
"""
Test clearing colors to null
"""
# start with a valid color
button = QgsColorButton()
button.setAllowOpacity(True)
button.setColor(QColor(255, 100, 200, 255))
self.assertEqual(button.color(), QColor(255, 100, 200, 255))
spy_changed = QSignalSpy(button.colorChanged)
spy_cleared = QSignalSpy(button.cleared)
button.setColor(QColor(50, 100, 200, 255))
self.assertEqual(button.color(), QColor(50, 100, 200, 255))
self.assertEqual(len(spy_changed), 1)
self.assertEqual(len(spy_cleared), 0)
# now set to null
button.setToNull()
self.assertEqual(button.color(), QColor())
self.assertEqual(len(spy_changed), 2)
self.assertEqual(len(spy_cleared), 1)
button.setToNull()
self.assertEqual(button.color(), QColor())
# should not be refired, the color wasn't changed
self.assertEqual(len(spy_changed), 2)
# SHOULD be refired
self.assertEqual(len(spy_cleared), 2)
def testLinkProjectColor(self):
"""
Test linking to a project color
"""
project_scheme = [s for s in QgsApplication.colorSchemeRegistry().schemes() if isinstance(s, QgsProjectColorScheme)][0]
project_scheme.setColors([[QColor(255, 0, 0), 'col1'], [QColor(0, 255, 0), 'col2']])
button = QgsColorButton()
spy = QSignalSpy(button.unlinked)
button.setColor(QColor(0, 0, 255))
self.assertFalse(button.linkedProjectColorName())
button.linkToProjectColor('col1')
self.assertEqual(button.linkedProjectColorName(), 'col1')
self.assertEqual(button.color().name(), '#ff0000')
self.assertEqual(len(spy), 0)
button.unlink()
self.assertFalse(button.linkedProjectColorName())
self.assertEqual(button.color().name(), '#0000ff')
self.assertEqual(len(spy), 1)
button.linkToProjectColor('col2')
self.assertEqual(button.linkedProjectColorName(), 'col2')
self.assertEqual(button.color().name(), '#00ff00')
self.assertEqual(len(spy), 1)
project_scheme.setColors([[QColor(255, 0, 0), 'xcol1'], [QColor(0, 255, 0), 'xcol2']])
# linked color no longer exists
self.assertFalse(button.linkedProjectColorName())
self.assertEqual(button.color().name(), '#0000ff')
self.assertEqual(len(spy), 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -6,647,135,460,310,048,000 | 33.733945 | 127 | 0.644216 | false |
tingletech/solr_api | testindex/grab_random_solr_docs.py | 1 | 2229 | import solr
import random
import json
import datetime
SOLR_NEW = solr.Solr('http://127.0.0.1:8983/solr/dc-collection')
SOLR=solr.SearchHandler(solr.Solr('https://registry.cdlib.org/solr',
post_headers = { 'X-Authentication-Token':'xxxyyyzzz'}), "/query")
def get_collection_urls():
q_collections=SOLR(q="*:*", rows=0, facet_field="collection_url",
facet="true", facet_limit=20000)
facets = q_collections.facet_counts
f_fields = facets['facet_fields']
return f_fields['collection_url']
def get_random_docs(collection_urls):
docs = []
for u in collection_urls:
recs_in_coll = SOLR(q="collection_url:{}".format(u))
num = recs_in_coll.numFound
sample_size = num / 100 if num / 100 else 1
print "NUMBER:{} SAMPLE:{}".format(num, sample_size)
for i in range(sample_size):
rand_index = random.randrange(num)
q_rec = SOLR(q="collection_url:{}".format(u), rows=1, start=rand_index)
#save locally
doc_new = {}
for key, val in q_rec.results[0].items():
if '_ss' in key:
continue
if key in ['score', '_version_', 'timestamp',]:
continue
doc_new[key] = val
docs.append(doc_new)
return docs
def serialize_datetime(obj):
if isinstance(obj, datetime.datetime):
return obj.strftime("%Y%m%d-%H:%M:%S")
return obj
def save_docs_to_file(docs, fname=None):
if not fname:
fname = 'random_docs-{}.json'.format(
datetime.datetime.now().strftime('%Y%m%d-%H%M'))
with open(fname, 'w') as foo:
foo.write(json.dumps(docs, default=serialize_datetime))
def save_to_local_solr(docs):
#put into new index: start a docker index with mapped volume for data and
#upload docs to it
for doc in docs:
print "DOC:{}".format(doc['id'])
solr_new.add(doc)
solr_new.commit()
def create_new_random_test_index():
collection_urls = get_collection_urls()
docs_selected = get_random_docs(collection_urls)
save_docs_to_file(docs_selected)
save_to_local_solr(docs_selected)
if __name__=="__main__":
print 'Generate new test data set'
| bsd-3-clause | 5,988,211,790,530,985,000 | 32.268657 | 83 | 0.605653 | false |
matteo88/gasistafelice | gasistafelice/rest/views/blocks/transactions.py | 2 | 6543 | from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseServerError
from flexi_auth.models import ObjectWithContext
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction, CREATE_CSV
from gasistafelice.consts import VIEW_CONFIDENTIAL, CONFIDENTIAL_VERBOSE_HTML, CASH
from gasistafelice.base.templatetags.accounting_tags import human_readable_account_csv,human_readable_kind, signed_ledger_entry_amount
from django.template.loader import render_to_string
import datetime, csv
import cStringIO as StringIO
#from simple_accounting.models import economic_subject, AccountingDescriptor
#from simple_accounting.models import account_type
#from simple_accounting.exceptions import MalformedTransaction
#from simple_accounting.models import AccountingProxy
#from simple_accounting.utils import register_transaction, register_simple_transaction
#from gasistafelice.base.accounting import PersonAccountingProxy
from gasistafelice.lib.shortcuts import render_to_xml_response, render_to_context_response
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
#OLD: ENCODING = "iso-8859-1"
class Block(BlockSSDataTables):
BLOCK_NAME = "transactions"
BLOCK_DESCRIPTION = _("Economic transactions")
BLOCK_VALID_RESOURCE_TYPES = ["gas", "supplier", "pact"]
COLUMN_INDEX_NAME_MAP = {
0: 'id',
1: 'transaction__date',
2: '',
3: '',
4: 'amount',
5: 'transaction__description',
}
#WAS 2: 'transaction__issuer',
#WAS 3: 'transaction__source',
#WAS 3: 'transaction__kind', --> FIXME: In case of translation the search does not operate correctly
def __init__(self, *args, **kw):
super(Block, self).__init__(*args, **kw)
# Default start closed. Mainly for GAS -> Accounting tab ("Conto")
self.start_open = False
def _check_permission(self, request):
if request.resource.gas:
return request.user.has_perm(
CASH, obj=ObjectWithContext(request.resource.gas)
)
else:
return True
def _get_resource_list(self, request):
#Accounting.LedgerEntry or Transactions
return request.resource.economic_movements
def get_response(self, request, resource_type, resource_id, args):
"""Check for confidential access permission and call superclass if needed"""
if not self._check_permission(request):
return render_to_xml_response(
"blocks/table_html_message.xml",
{ 'msg' : CONFIDENTIAL_VERBOSE_HTML }
)
if args == CREATE_CSV:
return self._create_csv(request)
return super(Block, self).get_response(request, resource_type, resource_id, args)
#TODO: Filter grid by
# Date From --> To
# Kind iof transctions: can be checkbox list multiselect
# Subject: Radio or multiple checkbox onto values [GAS borselino, GASMemmbers, Suppliers]
# def options_response(self, request, resource_type, resource_id):
# """Get options for transaction block.
# WARNING: call to this method doesn't pass through get_response
# so you have to reset self.request and self.resource attribute if you want
# """
# self.request = request
# self.resource = request.resource
# fields = []
# #DATE FROM
# fields.append({
# 'field_type' : 'datetime',
# 'field_label' : 'from date',
# 'field_name' : 'from',
# 'field_values' : [{ 'value' : '22/09/2012', 'selected' : ''}]
# })
# #DATE TO
# fields.append({
# 'field_type' : 'datetime',
# 'field_label' : 'to date',
# 'field_name' : 'to',
# 'field_values' : [{ 'value' : '28/09/2012', 'label' : 'labelvalue', 'selected' : 'sel'}]
# })
# ctx = {
# 'block_name' : self.description,
# 'fields': fields,
# }
# #Can use html template loader
# return render_to_xml_response('eco-options.xml', ctx)
def _get_user_actions(self, request):
user_actions = []
resource_type = request.resource.resource_type
if self._check_permission(request):
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=CREATE_CSV, verbose_name=_("Create CSV"),
popup_form=False,
method="OPENURL",
),
]
return user_actions
def _create_csv(self, request):
""" Create CSV of this block transactions
#MATTEO TOREMOVE: lascio la prima implementazione (da levare
ovviamente dall'integrazione) come monito a me stesso -->
kiss, kiss e ancora kiss !!
#NOTA: eliminare nell'integrazione tutte le righe commentate con #OLD:
"""
headers = [_(u'Id'), _(u'Data'), _(u'Account'), _(u'Kind'), _(u'Cash amount'), _(u'Description')]
records = self._get_resource_list(request)
csvfile = StringIO.StringIO()
writer = csv.writer(csvfile, delimiter=';',quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(headers)
for res in self._get_resource_list(request):
writer.writerow([res.pk,
'{0:%a %d %b %Y %H:%M}'.format(res.date),
human_readable_account_csv(res.account),
human_readable_kind(res.transaction.kind),
signed_ledger_entry_amount(res),
res.transaction.description.encode("utf-8", "ignore")
])
csv_data = csvfile.getvalue()
if not csv_data:
rv = HttpResponseServerError(_('Report not generated'))
else:
response = HttpResponse(csv_data, content_type='text/csv')
filename = "%(res)s_%(date)s.csv" % {
'res': request.resource,
'date' : '{0:%Y%m%d_%H%M}'.format(datetime.datetime.now())
}
response['Content-Disposition'] = "attachment; filename=" + filename
rv = response
return rv
| agpl-3.0 | -1,768,851,906,807,601,700 | 35.966102 | 134 | 0.583066 | false |
lostdj/Jaklin-OpenJFX | modules/web/src/main/native/Tools/gtk/gtkdoc.py | 2 | 18084 | # Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import logging
import os
import os.path
import subprocess
import sys
class GTKDoc(object):
"""Class that controls a gtkdoc run.
Each instance of this class represents one gtkdoc configuration
and set of documentation. The gtkdoc package is a series of tools
run consecutively which converts inline C/C++ documentation into
docbook files and then into HTML. This class is suitable for
generating documentation or simply verifying correctness.
Keyword arguments:
output_dir -- The path where gtkdoc output should be placed. Generation
may overwrite file in this directory. Required.
module_name -- The name of the documentation module. For libraries this
is typically the library name. Required if not library path
is given.
source_dirs -- A list of paths to the source code to be scanned. Required.
ignored_files -- A list of filenames to ignore in the source directory. It is
only necessary to provide the basenames of these files.
Typically it is important to provide an updated list of
ignored files to prevent warnings about undocumented symbols.
namespace -- The library namespace.
decorator -- If a decorator is used to unhide certain symbols in header
files this parameter is required for successful scanning.
(default '')
deprecation_guard -- gtkdoc tries to ensure that symbols marked as deprecated
are encased in this C preprocessor define. This is required
to avoid gtkdoc warnings. (default '')
cflags -- This parameter specifies any preprocessor flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes all absolute include paths necessary to resolve
all header dependencies. (default '')
ldflags -- This parameter specifies any linker flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes "-lyourlibraryname". (default '')
library_path -- This parameter specifies the path to the directory where you
library resides used for building the scanner binary during
gtkdoc-scanobj. (default '')
doc_dir -- The path to other documentation files necessary to build
the documentation. This files in this directory as well as
the files in the 'html' subdirectory will be copied
recursively into the output directory. (default '')
main_sgml_file -- The path or name (if a doc_dir is given) of the SGML file
that is the considered the main page of your documentation.
(default: <module_name>-docs.sgml)
version -- The version number of the module. If this is provided,
a version.xml file containing the version will be created
in the output directory during documentation generation.
interactive -- Whether or not errors or warnings should prompt the user
to continue or not. When this value is false, generation
will continue despite warnings. (default False)
virtual_root -- A temporary installation directory which is used as the root
where the actual installation prefix lives; this is mostly
useful for packagers, and should be set to what is given to
make install as DESTDIR.
"""
def __init__(self, args):
# Parameters specific to scanning.
self.module_name = ''
self.source_dirs = []
self.ignored_files = []
self.namespace = ''
self.decorator = ''
self.deprecation_guard = ''
# Parameters specific to gtkdoc-scanobj.
self.cflags = ''
self.ldflags = ''
self.library_path = ''
# Parameters specific to generation.
self.output_dir = ''
self.doc_dir = ''
self.main_sgml_file = ''
# Parameters specific to gtkdoc-fixxref.
self.cross_reference_deps = []
self.interactive = False
self.logger = logging.getLogger('gtkdoc')
for key, value in iter(args.items()):
setattr(self, key, value)
def raise_error_if_not_specified(key):
if not getattr(self, key):
raise Exception('%s not specified.' % key)
raise_error_if_not_specified('output_dir')
raise_error_if_not_specified('source_dirs')
raise_error_if_not_specified('module_name')
# Make all paths absolute in case we were passed relative paths, since
# we change the current working directory when executing subcommands.
self.output_dir = os.path.abspath(self.output_dir)
self.source_dirs = [os.path.abspath(x) for x in self.source_dirs]
if self.library_path:
self.library_path = os.path.abspath(self.library_path)
if not self.main_sgml_file:
self.main_sgml_file = self.module_name + "-docs.sgml"
def generate(self, html=True):
self.saw_warnings = False
self._copy_doc_files_to_output_dir(html)
self._write_version_xml()
self._run_gtkdoc_scan()
self._run_gtkdoc_scangobj()
self._run_gtkdoc_mktmpl()
self._run_gtkdoc_mkdb()
if not html:
return
self._run_gtkdoc_mkhtml()
self._run_gtkdoc_fixxref()
def _delete_file_if_exists(self, path):
if not os.access(path, os.F_OK | os.R_OK):
return
self.logger.debug('deleting %s', path)
os.unlink(path)
def _create_directory_if_nonexistent(self, path):
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _raise_exception_if_file_inaccessible(self, path):
if not os.path.exists(path) or not os.access(path, os.R_OK):
raise Exception("Could not access file at: %s" % path)
def _output_has_warnings(self, outputs):
for output in outputs:
if output and output.find('warning'):
return True
return False
def _ask_yes_or_no_question(self, question):
if not self.interactive:
return True
question += ' [y/N] '
answer = None
while answer != 'y' and answer != 'n' and answer != '':
answer = raw_input(question).lower()
return answer == 'y'
def _run_command(self, args, env=None, cwd=None, print_output=True, ignore_warnings=False):
if print_output:
self.logger.info("Running %s", args[0])
self.logger.debug("Full command args: %s", str(args))
process = subprocess.Popen(args, env=env, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = [b.decode("utf-8") for b in process.communicate()]
if print_output:
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if process.returncode != 0:
raise Exception('%s produced a non-zero return code %i'
% (args[0], process.returncode))
if not ignore_warnings and ('warning' in stderr or 'warning' in stdout):
self.saw_warnings = True
if not self._ask_yes_or_no_question('%s produced warnings, '
'try to continue?' % args[0]):
raise Exception('%s step failed' % args[0])
return stdout.strip()
def _copy_doc_files_to_output_dir(self, html=True):
if not self.doc_dir:
self.logger.info('Not copying any files from doc directory,'
' because no doc directory given.')
return
def copy_file_replacing_existing(src, dest):
if os.path.isdir(src):
self.logger.debug('skipped directory %s', src)
return
if not os.access(src, os.F_OK | os.R_OK):
self.logger.debug('skipped unreadable %s', src)
return
self._delete_file_if_exists(dest)
self.logger.debug('created %s', dest)
try:
os.link(src, dest)
except OSError:
os.symlink(src, dest)
def copy_all_files_in_directory(src, dest):
for path in os.listdir(src):
copy_file_replacing_existing(os.path.join(src, path),
os.path.join(dest, path))
self.logger.info('Copying template files to output directory...')
self._create_directory_if_nonexistent(self.output_dir)
copy_all_files_in_directory(self.doc_dir, self.output_dir)
if not html:
return
self.logger.info('Copying HTML files to output directory...')
html_src_dir = os.path.join(self.doc_dir, 'html')
html_dest_dir = os.path.join(self.output_dir, 'html')
self._create_directory_if_nonexistent(html_dest_dir)
if os.path.exists(html_src_dir):
copy_all_files_in_directory(html_src_dir, html_dest_dir)
def _write_version_xml(self):
if not self.version:
self.logger.info('No version specified, so not writing version.xml')
return
version_xml_path = os.path.join(self.output_dir, 'version.xml')
src_version_xml_path = os.path.join(self.doc_dir, 'version.xml')
# Don't overwrite version.xml if it was in the doc directory.
if os.path.exists(version_xml_path) and \
os.path.exists(src_version_xml_path):
return
output_file = open(version_xml_path, 'w')
output_file.write(self.version)
output_file.close()
def _ignored_files_basenames(self):
return ' '.join([os.path.basename(x) for x in self.ignored_files])
def _run_gtkdoc_scan(self):
args = ['gtkdoc-scan',
'--module=%s' % self.module_name,
'--rebuild-types']
# Each source directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
if self.decorator:
args.append('--ignore-decorators=%s' % self.decorator)
if self.deprecation_guard:
args.append('--deprecated-guards=%s' % self.deprecation_guard)
if self.output_dir:
args.append('--output-dir=%s' % self.output_dir)
# gtkdoc-scan wants the basenames of ignored headers, so strip the
# dirname. Different from "--source-dir", the headers should be
# specified as one long string.
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-headers=%s' % ignored_files_basenames)
self._run_command(args)
def _run_gtkdoc_scangobj(self):
env = os.environ
ldflags = self.ldflags
if self.library_path:
ldflags = ' "-L%s" ' % self.library_path + ldflags
current_ld_library_path = env.get('LD_LIBRARY_PATH')
if current_ld_library_path:
env['RUN'] = 'LD_LIBRARY_PATH="%s:%s" ' % (self.library_path, current_ld_library_path)
else:
env['RUN'] = 'LD_LIBRARY_PATH="%s" ' % self.library_path
if ldflags:
env['LDFLAGS'] = '%s %s' % (ldflags, env.get('LDFLAGS', ''))
if self.cflags:
env['CFLAGS'] = '%s %s' % (self.cflags, env.get('CFLAGS', ''))
if 'CFLAGS' in env:
self.logger.debug('CFLAGS=%s', env['CFLAGS'])
if 'LDFLAGS' in env:
self.logger.debug('LDFLAGS %s', env['LDFLAGS'])
if 'RUN' in env:
self.logger.debug('RUN=%s', env['RUN'])
self._run_command(['gtkdoc-scangobj', '--module=%s' % self.module_name],
env=env, cwd=self.output_dir)
def _run_gtkdoc_mktmpl(self):
args = ['gtkdoc-mktmpl', '--module=%s' % self.module_name]
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkdb(self):
sgml_file = os.path.join(self.output_dir, self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
args = ['gtkdoc-mkdb',
'--module=%s' % self.module_name,
'--main-sgml-file=%s' % sgml_file,
'--source-suffixes=h,c,cpp,cc',
'--output-format=xml',
'--sgml-mode']
if self.namespace:
args.append('--name-space=%s' % self.namespace)
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-files=%s' % ignored_files_basenames)
# Each directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkhtml(self):
html_dest_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(html_dest_dir):
raise Exception("%s is not a directory, could not generate HTML"
% html_dest_dir)
elif not os.access(html_dest_dir, os.X_OK | os.R_OK | os.W_OK):
raise Exception("Could not access %s to generate HTML"
% html_dest_dir)
# gtkdoc-mkhtml expects the SGML path to be absolute.
sgml_file = os.path.join(os.path.abspath(self.output_dir),
self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
self._run_command(['gtkdoc-mkhtml', self.module_name, sgml_file],
cwd=html_dest_dir)
def _run_gtkdoc_fixxref(self):
args = ['gtkdoc-fixxref',
'--module-dir=html',
'--html-dir=html']
args.extend(['--extra-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
self._run_command(args, cwd=self.output_dir, ignore_warnings=True)
def rebase_installed_docs(self):
if not os.path.isdir(self.output_dir):
raise Exception("Tried to rebase documentation before generating it.")
html_dir = os.path.join(self.virtual_root + self.prefix, 'share', 'gtk-doc', 'html', self.module_name)
if not os.path.isdir(html_dir):
return
args = ['gtkdoc-rebase',
'--relative',
'--html-dir=%s' % html_dir]
args.extend(['--other-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
if self.virtual_root:
args.extend(['--dest-dir=%s' % self.virtual_root])
self._run_command(args, cwd=self.output_dir)
def api_missing_documentation(self):
unused_doc_file = os.path.join(self.output_dir, self.module_name + "-unused.txt")
if not os.path.exists(unused_doc_file) or not os.access(unused_doc_file, os.R_OK):
return []
return open(unused_doc_file).read().splitlines()
class PkgConfigGTKDoc(GTKDoc):
"""Class reads a library's pkgconfig file to guess gtkdoc parameters.
Some gtkdoc parameters can be guessed by reading a library's pkgconfig
file, including the cflags, ldflags and version parameters. If you
provide these parameters as well, they will be appended to the ones
guessed via the pkgconfig file.
Keyword arguments:
pkg_config_path -- Path to the pkgconfig file for the library. Required.
"""
def __init__(self, pkg_config_path, args):
super(PkgConfigGTKDoc, self).__init__(args)
pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
if not os.path.exists(pkg_config_path):
raise Exception('Could not find pkg-config file at: %s'
% pkg_config_path)
self.cflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--cflags'], print_output=False)
self.ldflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--libs'], print_output=False)
self.version = self._run_command([pkg_config,
pkg_config_path,
'--modversion'], print_output=False)
self.prefix = self._run_command([pkg_config,
pkg_config_path,
'--variable=prefix'], print_output=False)
| gpl-2.0 | 405,790,641,965,444,400 | 41.650943 | 110 | 0.575592 | false |
jumpstarter-io/cinder | cinder/tests/brick/fake_lvm.py | 9 | 2074 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class FakeBrickLVM(object):
"""Logs and records calls, for unit tests."""
def __init__(self, vg_name, create, pv_list, vtype, execute=None):
super(FakeBrickLVM, self).__init__()
self.vg_size = '5.00'
self.vg_free_space = '5.00'
self.vg_name = vg_name
def supports_thin_provisioning():
return False
def get_all_volumes(vg_name=None):
if vg_name is not None:
return [vg_name]
return ['cinder-volumes', 'fake-vg-1']
def get_volumes(self):
return ['fake-volume']
def get_volume(self, name):
return ['name']
def get_all_physical_volumes(vg_name=None):
return []
def get_physical_volumes(self):
return []
def get_all_volume_groups(vg_name=None):
return ['cinder-volumes', 'fake-vg']
def update_volume_group_info(self):
pass
def create_thin_pool(self, name=None, size_str=0):
pass
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
pass
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
pass
def delete(self, name):
pass
def revert(self, snapshot_name):
pass
def lv_has_snapshot(self, name):
return False
def activate_lv(self, lv, is_snapshot=False):
pass
def rename_volume(self, lv_name, new_name):
pass
| apache-2.0 | 454,912,133,152,369,660 | 26.653333 | 79 | 0.631148 | false |
puneetgkaur/backup_sugar_shell_for_cordova | src/jarabe/model/screen.py | 4 | 1532 | # Copyright (C) 2006-2008 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import dbus
_HARDWARE_MANAGER_INTERFACE = 'org.freedesktop.ohm.Keystore'
_HARDWARE_MANAGER_SERVICE = 'org.freedesktop.ohm'
_HARDWARE_MANAGER_OBJECT_PATH = '/org/freedesktop/ohm/Keystore'
_ohm_service = None
def _get_ohm():
global _ohm_service
if _ohm_service is None:
bus = dbus.SystemBus()
proxy = bus.get_object(_HARDWARE_MANAGER_SERVICE,
_HARDWARE_MANAGER_OBJECT_PATH,
follow_name_owner_changes=True)
_ohm_service = dbus.Interface(proxy, _HARDWARE_MANAGER_INTERFACE)
return _ohm_service
def set_dcon_freeze(frozen):
try:
_get_ohm().SetKey('display.dcon_freeze', frozen)
except dbus.DBusException:
logging.error('Cannot unfreeze the DCON')
| gpl-2.0 | 2,906,281,008,176,276,500 | 33.044444 | 76 | 0.701044 | false |
onecue/pybbm | test/test_project/test_app/south_migrations/0001_initial.py | 6 | 8667 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from pybb.compat import get_image_field_full_name, get_user_model_path, get_user_frozen_models
AUTH_USER = get_user_model_path()
class Migration(SchemaMigration):
def forwards(self, orm):
if AUTH_USER == 'test_app.CustomUser':
# Adding model 'CustomUser'
db.create_table('test_app_customuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('test_app', ['CustomUser'])
# Adding M2M table for field groups on 'CustomUser'
m2m_table_name = db.shorten_name('test_app_customuser_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customuser', models.ForeignKey(orm['test_app.customuser'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['customuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'CustomUser'
m2m_table_name = db.shorten_name('test_app_customuser_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('customuser', models.ForeignKey(orm['test_app.customuser'], null=False)),
('permission', models.ForeignKey(orm['auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['customuser_id', 'permission_id'])
# Adding model 'CustomProfile'
db.create_table('test_app_customprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('signature', self.gf('django.db.models.fields.TextField')(max_length=1024, blank=True)),
('signature_html', self.gf('django.db.models.fields.TextField')(max_length=1054, blank=True)),
('time_zone', self.gf('django.db.models.fields.FloatField')(default=3.0)),
('language', self.gf('django.db.models.fields.CharField')(default='en-us', max_length=10, blank=True)),
('show_signatures', self.gf('django.db.models.fields.BooleanField')(default=True)),
('post_count', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)),
('avatar', self.gf(get_image_field_full_name())(max_length=100, null=True, blank=True)),
('autosubscribe', self.gf('django.db.models.fields.BooleanField')(default=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='pybb_customprofile', unique=True, to=orm[AUTH_USER])),
))
db.send_create_signal('test_app', ['CustomProfile'])
def backwards(self, orm):
if AUTH_USER == 'test_app.CustomUser':
# Deleting model 'CustomUser'
db.delete_table('test_app_customuser')
# Removing M2M table for field groups on 'CustomUser'
db.delete_table(db.shorten_name('test_app_customuser_groups'))
# Removing M2M table for field user_permissions on 'CustomUser'
db.delete_table(db.shorten_name('test_app_customuser_user_permissions'))
# Deleting model 'CustomProfile'
db.delete_table('test_app_customprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'test_app.customprofile': {
'Meta': {'object_name': 'CustomProfile'},
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': (get_image_field_full_name(), [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '10', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'pybb_customprofile'", 'unique': 'True', 'to': "orm['%s']" % AUTH_USER})
},
}
if AUTH_USER == 'test_app.CustomUser':
models['test_app.customuser'] = {
'Meta': {'object_name': 'CustomUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
else:
models.update(get_user_frozen_models(AUTH_USER))
complete_apps = ['test_app'] | bsd-2-clause | -9,191,989,882,362,526,000 | 64.172932 | 193 | 0.586593 | false |
chouseknecht/galaxy | galaxy/wsgi.py | 1 | 1085 | # (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
"""
WSGI config for Galaxy project.
"""
import os
from django.core.wsgi import get_wsgi_application
from galaxy import prepare_env
# For public Galaxy, we need to default /etc/galaxy/settings.py
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'galaxy.settings.custom')
# Prepare the galaxy environment.
prepare_env()
# Return the default Django WSGI application.
application = get_wsgi_application()
| apache-2.0 | 3,226,432,793,933,311,000 | 30.911765 | 73 | 0.76129 | false |
dcalacci/Interactive_estimation | game/interactive/migrations/0001_squashed_0011_interactive_channel.py | 2 | 3024 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-25 06:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('interactive', '0001_initial'), ('interactive', '0002_auto_20160924_2245'), ('interactive', '0003_auto_20160924_2245'), ('interactive', '0004_auto_20160925_0129'), ('interactive', '0005_auto_20160925_0132'), ('interactive', '0006_auto_20160925_0201'), ('interactive', '0007_auto_20160925_0204'), ('interactive', '0008_auto_20160925_0207'), ('interactive', '0009_auto_20160925_0210'), ('interactive', '0010_auto_20160925_0213'), ('interactive', '0011_interactive_channel')]
initial = True
dependencies = [
('round', '0002_round_round_order'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Interactive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('end_time', models.DateTimeField(null=True)),
('start_time', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='InteractiveRound',
fields=[
('round_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='round.Round')),
('influencers', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
bases=('round.round',),
),
migrations.AddField(
model_name='interactive',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='interactive',
name='started',
field=models.BooleanField(default=False),
),
migrations.CreateModel(
name='Settings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('max_users', models.PositiveSmallIntegerField()),
('min_users', models.PositiveSmallIntegerField()),
('max_influencers', models.PositiveSmallIntegerField()),
('min_influencers', models.PositiveSmallIntegerField()),
],
),
migrations.AddField(
model_name='interactive',
name='constraints',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='interactive.Settings'),
),
migrations.AddField(
model_name='interactive',
name='channel',
field=models.CharField(default='Helloworld', max_length=100, unique=True),
preserve_default=False,
),
]
| mit | 3,200,143,205,543,564,300 | 42.826087 | 489 | 0.60086 | false |
interactomix/iis | iis/tests/test_daemonize.py | 1 | 1182 | import time
from iis.extensions import db
from test_utils.base import BaseTestCase
from iis.util.daemons import daemonize
import iis.models
class TestDaemonize(BaseTestCase):
DAEMON_PID_PATH = "/tmp"
def test_no_exception_raised_and_returns_pid(self):
self.app.logger.debug("Testing daemonize")
def test_worker(uuid):
self.app.logger.debug("Executing test process")
comp = iis.models.Computation(process_uid=uuid, input_data="",
output_data="", status="finished",
progress=100)
db.session.add(comp)
db.session.commit()
self.app.logger.debug("Commited Computation model.")
uid = daemonize(test_worker,
pid_base=TestDaemonize.DAEMON_PID_PATH) # type: str
self.assertTrue(isinstance(uid, str))
time.sleep(5)
self.app.logger.debug("Accessing Computation model.")
comp = iis.models.Computation.query.filter_by(process_uid=uid).first()
self.assertEqual(comp.status, "finished")
self.app.logger.debug("Testing daemonize successful")
| agpl-3.0 | 2,391,770,002,420,793,300 | 35.9375 | 78 | 0.614213 | false |
SRabbelier/Melange | app/soc/modules/gci/logic/models/timeline.py | 1 | 1347 | #!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCITimeline (Model) query functions.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>'
]
from soc.logic.models import timeline
from soc.logic.models import sponsor as sponsor_logic
import soc.models.timeline
import soc.modules.gci.models.timeline
class Logic(timeline.Logic):
"""Logic methods for the GCITimeline model.
"""
def __init__(self, model=soc.modules.gci.models.timeline.GCITimeline,
base_model=soc.models.timeline.Timeline,
scope_logic=sponsor_logic):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model, base_model=base_model,
scope_logic=scope_logic)
logic = Logic()
| apache-2.0 | 4,513,322,473,889,183,000 | 27.659574 | 74 | 0.701559 | false |
hojel/calibre | src/chardet/escprober.py | 215 | 3029 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from escsm import HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel
from charsetprober import CharSetProber
from codingstatemachine import CodingStateMachine
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [ \
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM: continue
codingSM.active = constants.True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
for codingSM in self._mCodingSM:
if not codingSM: continue
if not codingSM.active: continue
codingState = codingSM.next_state(c)
if codingState == constants.eError:
codingSM.active = constants.False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine()
return self.get_state()
return self.get_state()
| gpl-3.0 | -1,624,288,395,901,451,500 | 37.341772 | 81 | 0.623638 | false |
Vimos/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause | -9,053,754,419,972,987,000 | 30.068966 | 79 | 0.758879 | false |
nikitasingh981/scikit-learn | sklearn/decomposition/fastica_.py | 9 | 18717 | """
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
warnings.warn('n_components is too large: it will be set to %s' % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
| bsd-3-clause | -5,049,595,735,058,600,000 | 31.382353 | 87 | 0.587594 | false |
sysalexis/kbengine | kbe/src/lib/python/Lib/tokenize.py | 78 | 24876 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import re
import sys
from token import *
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
if tok_type == ENDMARKER:
break
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| lgpl-3.0 | 4,470,033,518,050,312,000 | 35 | 83 | 0.514994 | false |
pattisdr/osf.io | osf/migrations/0086_pre_migrate_collections.py | 17 | 4590 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-05 16:30
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import osf.models.validators
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('osf', '0085_merge_20180316_1625'),
]
operations = [
migrations.CreateModel(
name='CollectedGuidMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('collected_type', models.CharField(blank=True, max_length=31)),
('status', models.CharField(blank=True, max_length=31)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('guid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.Guid')),
],
options={
'abstract': False,
},
),
migrations.DeleteModel(
name='Collection',
),
migrations.CreateModel(
name='CollectionProvider',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('osf.abstractprovider',),
),
migrations.AlterField(
model_name='abstractprovider',
name='type',
field=models.CharField(choices=[('osf.collectionprovider', 'collection provider'), ('osf.preprintprovider', 'preprint provider')], db_index=True, max_length=255),
),
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('content_type_pk', models.PositiveIntegerField(blank=True, null=True)),
('title', models.CharField(max_length=200, validators=[osf.models.validators.validate_title])),
('collected_type_choices', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=31), blank=True, default=list, size=None)),
('status_choices', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=31), blank=True, default=list, size=None)),
('is_public', models.BooleanField(db_index=True, default=False)),
('is_promoted', models.BooleanField(db_index=True, default=False)),
('is_bookmark_collection', models.BooleanField(db_index=True, default=False)),
('collected_types', models.ManyToManyField(related_name='_collection_collected_types_+', to='contenttypes.ContentType')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('guid_links', models.ManyToManyField(related_name='collections', through='osf.CollectedGuidMetadata', to='osf.Guid')),
('provider', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='osf.AbstractProvider')),
('deleted', osf.utils.fields.NonNaiveDateTimeField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='collectedguidmetadata',
name='collection',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.Collection'),
),
migrations.AlterOrderWithRespectTo(
name='collectedguidmetadata',
order_with_respect_to='collection',
),
migrations.AlterUniqueTogether(
name='collectedguidmetadata',
unique_together=set([('collection', 'guid')]),
),
]
| apache-2.0 | -7,832,653,434,278,711,000 | 49.43956 | 174 | 0.61329 | false |
DarkSand/Sasila | sasila/system_normal/downloader/http/selenium_response.py | 1 | 1112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from posixpath import normpath
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
from urlparse import urljoin, urlparse, urlunparse
else:
from urllib.parse import urljoin, urlparse, urlunparse
class SeleniumResponse(object):
def __init__(self, m_response=None, request=None):
self.request = request
self.m_response = m_response
def __str__(self):
if self.m_response:
return "<SeleniumResponse [%s] [%.2f KB]>" % (self.request.url, (float(len(self.m_response.content)) / 1000))
else:
return "<SeleniumResponse failed: %s>" % self.request.url
def nice_join(self, url):
url1 = urljoin(self.request.url, url)
arr = urlparse(url1)
path = normpath(arr[2])
return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))
def is_url(self, url):
if re.match(r'^https?:/{2}\w.+$', url):
return True
else:
return False
__repr__ = __str__
| apache-2.0 | -6,278,777,995,057,327,000 | 28.263158 | 121 | 0.603417 | false |
apixandru/intellij-community | python/lib/Lib/site-packages/django/db/backends/sqlite3/base.py | 93 | 12206 | """
SQLite3 backend for django.
Python 2.4 requires pysqlite2 (http://pysqlite.org/).
Python 2.5 and later can use a pysqlite2 module or the sqlite3 module in the
standard library.
"""
import re
import sys
import datetime
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.utils.safestring import SafeString
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError, e1:
from sqlite3 import dbapi2 as Database
except ImportError, exc:
import sys
from django.core.exceptions import ImproperlyConfigured
if sys.version_info < (2, 5, 0):
module = 'pysqlite2 module'
exc = e1
else:
module = 'either pysqlite2 or sqlite3 modules (tried in that order)'
raise ImproperlyConfigured("Error loading %s: %s" % (module, exc))
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
Database.register_converter("bool", lambda s: str(s) == '1')
Database.register_converter("time", util.typecast_time)
Database.register_converter("date", util.typecast_date)
Database.register_converter("datetime", util.typecast_timestamp)
Database.register_converter("timestamp", util.typecast_timestamp)
Database.register_converter("TIMESTAMP", util.typecast_timestamp)
Database.register_converter("decimal", util.typecast_decimal)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2,4,1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it.
Database.register_adapter(str, lambda s:s.decode('utf-8'))
Database.register_adapter(SafeString, lambda s:s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
def _supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return u'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return util.typecast_date(value)
elif internal_type == 'DateTimeField':
return util.typecast_timestamp(value)
elif internal_type == 'TimeField':
return util.typecast_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
self.connection.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
connection_created.send(sender=self.__class__, connection=self)
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
FORMAT_QMARK_REGEX = re.compile(r'(?![^%])%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
try:
return Database.Cursor.execute(self, query, params)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, param_list):
query = self.convert_query(query)
try:
return Database.Cursor.executemany(self, query, param_list)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%','%')
def _sqlite_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
if isinstance(dt, datetime.datetime):
rv = dt.strftime("%Y-%m-%d %H:%M:%S")
if dt.microsecond:
rv = "%s.%0.6d" % (rv, dt.microsecond)
else:
rv = dt.strftime("%Y-%m-%d")
return rv
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
| apache-2.0 | 3,400,505,049,174,793,000 | 39.686667 | 129 | 0.642143 | false |
sxjscience/tvm | python/tvm/relay/frontend/qnn_torch.py | 2 | 27334 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel
""" Functions to convert quantized torch models to QNN """
import logging
import numpy as np
import tvm
from tvm import relay
from tvm.relay import expr as _expr
from tvm.relay import op as _op
from tvm.relay.frontend.common import infer_shape
class QNNParam:
""" A placeholder for weight quantization parameters """
def __init__(self, weight, bias, scale, zero_point, param_key):
param_prefix = param_key[: -len("._packed_params")]
self.weight_var = _expr.var(param_prefix + "_weight", shape=weight.shape)
self.weight = weight
if bias is not None:
self.bias_var = _expr.var(param_prefix + "_bias", shape=bias.shape)
self.bias = bias.detach().numpy()
else:
self.bias_var = None
self.bias = None
self.scale = _expr.const(scale)
self.zero_point = _expr.const(zero_point, dtype="int32")
def _unpack_quant_params(param_name, packed_params, unpack_func):
# Torch stores quantized params in a custom packed format,
# need to unpack and retrieve them as numpy arrays
qweight, bias = unpack_func(packed_params)
weight_np = qweight.dequantize().numpy()
import torch
if qweight.qscheme() == torch.per_tensor_affine:
param = QNNParam(
weight_np, bias, qweight.q_scale(), int(qweight.q_zero_point()), param_name
)
else:
scales = qweight.q_per_channel_scales().numpy()
zero_points = qweight.q_per_channel_zero_points().numpy()
# This is an assumption posed by QNN
msg = "The values of zero points should be all zero for per channel"
assert np.all(zero_points == 0), msg
param = QNNParam(weight_np, bias, scales, 0, param_name)
return param
def get_weight_quant_params(script_module):
""" Retrive and unpack weight parameters from quantized modules """
conv_packed_params = []
linear_packed_params = []
import torch
# conv and linear requires different unpacking function
# extract all conv and linear parameters separately to distinguish them
for name, m in script_module.named_modules():
if isinstance(m, torch.jit.RecursiveScriptModule):
if "Conv" in m.original_name:
conv_packed_params.append((name, m.state_dict()))
elif m.original_name == "LinearPackedParams":
linear_packed_params.append((name, m.state_dict()))
pairs = [
(torch.ops.quantized.conv2d_unpack, conv_packed_params),
(torch.ops.quantized.linear_unpack, linear_packed_params),
]
quant_params = {}
param_name = "_packed_params"
for unpack_func, params in pairs:
for name, state_dict in params:
assert len(state_dict) == 1
assert param_name in state_dict
key = name + "." + param_name
packed_param = state_dict[param_name]
quant_params[key] = _unpack_quant_params(key, packed_param, unpack_func)
return quant_params
def add_quant_params_to_outputs(outputs, packed_param_map, quant_params):
"""
Add quant params to outputs so that they can be referenced by other
ops later. Weights are quantized here.
"""
for node_name, packed_param_name in packed_param_map.items():
qparam = quant_params[packed_param_name]
qweight = relay.qnn.op.quantize(
qparam.weight_var, qparam.scale, qparam.zero_point, out_dtype="int8", axis=0
)
param_tup = (qweight, qparam.scale, qparam.zero_point, qparam.bias_var)
outputs[node_name] = param_tup
def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embeded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
"""
# Indices for output scale and zp
# For example, in quantized::conv2d(%input, %1, %2, %3, %4, %5, %6, %7),
# 6th and 7th arg are output scale and zp respectively.
output_quant_param_indices = {
"aten::quantize_per_tensor": (1, 2),
"quantized::conv2d": (6, 7),
"quantized::conv2d_relu": (6, 7),
"quantized::linear": (2, 3),
"quantized::linear_relu": (2, 3),
"quantized::add_relu": (2, 3),
"quantized::add": (2, 3),
"quantized::mul_relu": (2, 3),
"quantized::mul": (2, 3),
"quantized::cat": (2, 3),
"quantized::mul_scalar": (2, 3),
"quantized::add_scalar": (2, 3),
}
def dfs(current_node):
# trace back to find the producer of this input value
current_op = current_node.kind()
if current_op in output_quant_param_indices:
indices = output_quant_param_indices[current_op]
scale = current_node.inputsAt(indices[0])
zp = current_node.inputsAt(indices[1])
return scale, zp
# Trace back eariler nodes, dfs order
# Assume quantized tensor comes earlier in the args
for arg in current_node.inputs():
return dfs(arg.node())
# shouldn't happen
assert False, "No producer for %s" % (str(current_node))
return dfs(input_value.node())
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::add_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
s = input_scale
z = input_zero_point
c = scalar
c_q = round(c / s)
if q_min > z - c_q:
s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s
z_prime = q_min
elif q_max < z - c_q:
s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s
z_prime = q_max
else:
s_prime = s
z_prime = z - c_q
return s_prime, z_prime
def _get_mul_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::mul_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
self_scale = input_scale
self_zero_point = input_zero_point
other_val = scalar
if other_val > 0.0:
s_prime = other_val * self_scale
z_prime = self_zero_point
elif other_val == 0.0:
s_prime = 1.0
z_prime = 0
else:
s_prime = abs(other_val) * self_scale
z_prime = q_max - (self_zero_point - q_min)
return s_prime, z_prime
def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_point, scalar):
"""
The output scale and zp of {add,mul}_scalar op are not explicit in the IR
They are required for _get_quant_param_for_input above to work correctly
So calculate these params using the same way torch does, and make new
constant nodes in the input IR. Also add these params to the inputs of
scalar op.
For example,
%6 : float = prim::Constant[value=3.]()
%input : QUInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6)
becomes
%6 : float = prim::Constant[value=3.]()
%7 : float = prim::Constant[value=0.015686161816120148]()
%8 : int = prim::Constant[value=0]()
%input : UInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6, %7, %8)
%7 and %8 are newly created output scale and zp constant nodes
"""
import torch
operator = node.kind()
if operator == "quantized::mul_scalar":
out_scale, out_zero_point = _get_mul_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
elif operator == "quantized::add_scalar":
out_scale, out_zero_point = _get_add_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
else:
raise NotImplementedError("unsupported scalar op: %s" % operator)
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def add_input_quant_params_to_op_inputs(graph):
"""
In Torch, input quant params are not explicitly passed around
Instead, they are stored in QTensor data structure, and retrieved
at runtime by each quantized ops.
However, they need to be known statically for QNN translation.
To workaround and simplify the translation of inputs, we manually add
input quant params to inputs of Torch quantized operators listed below.
See _quantized_conv2d() below for example of why this is helpful.
For example,
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435)
becomes
%395 : float = prim::Constant[value=0.036212071776390076]()
%396 : int = prim::Constant[value=0]()
%430 : float = prim::Constant[value=0.16080744564533234]()
%431 : int = prim::Constant[value=42]()
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435,
%430, %431, %395, %396)
%434, %435 are output scale and zp of quantized::add op
%430, %431, %395, %396 are two pairs of input (scale, zp) for two tensors
added by this function
"""
# How many quantized tensors each op takes as inputs?
# A pair of (scale, zp) for each input quantized tensor will be added
# to the input nodes
num_quantized_inputs = {
"quantized::conv2d": 1,
"quantized::conv2d_relu": 1,
"quantized::linear": 1,
"quantized::linear_relu": 1,
"quantized::add_relu": 2,
"quantized::add": 2,
"quantized::mul_relu": 2,
"quantized::mul": 2,
"aten::dequantize": 1,
"aten::mean": 1,
"aten::upsample_bilinear2d": 1,
"aten::relu_": 1,
"aten::relu": 1,
"quantized::add_scalar": 1,
"quantized::mul_scalar": 1,
"quantized::relu6": 1,
}
need_input_quant_param = set(num_quantized_inputs.keys())
need_input_quant_param.add("quantized::cat")
for node in graph.nodes():
operator = node.kind()
if operator not in need_input_quant_param:
continue
input_scales = []
input_zero_points = []
if operator == "quantized::cat":
# the number of inputs to concat is not constant
# so handle it separately
inputs = node.inputsAt(0).node().inputs()
for inp in inputs:
scale, zp = _get_quant_param_for_input(inp)
input_scales.append(scale)
input_zero_points.append(zp)
else:
for i in range(num_quantized_inputs[operator]):
scale, zp = _get_quant_param_for_input(node.inputsAt(i))
input_scales.append(scale)
input_zero_points.append(zp)
if operator in ["quantized::add_scalar", "quantized::mul_scalar"]:
scalar = node.inputsAt(1).node().f("value")
inp_scale = input_scales[0].node().f("value")
inp_zero_point = input_zero_points[0].node().i("value")
# see the comments in this function above
_add_output_quant_params_to_scalar_op(node, graph, inp_scale, inp_zero_point, scalar)
for scale, zp in zip(input_scales, input_zero_points):
node.addInput(scale)
node.addInput(zp)
def add_quant_params(params, quant_params):
""" Add quant parameters to TVM param map """
for qparam in quant_params.values():
params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight)
if qparam.bias is not None:
params[qparam.bias_var.name_hint] = tvm.nd.array(qparam.bias)
def apply_with_upcast(data, func):
inp = _op.cast(data, dtype="int32")
out = func(inp)
return _op.cast(out, "uint8")
def quantized_mean(data, input_scale, input_zero_point, func_fp32):
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
dequantized = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(dequantized)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_upsample(data, input_scale, input_zero_point, func_fp32):
# currently piggy backs to fp32, it gets identical output as torch
data = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(data)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_relu(data, input_zero_point):
# refer to aten/src/ATen/native/quantized/cpu/qrelu.cpp
zp = _op.cast(input_zero_point, dtype="uint8")
return _op.tensor.maximum(data, zp)
def _quantize_per_tensor():
def _impl(inputs, _):
return relay.qnn.op.quantize(
inputs[0], _expr.const(inputs[1]), _expr.const(inputs[2]), out_dtype="uint8", axis=1
)
return _impl
def _dequantize():
def _impl(inputs, _):
assert len(inputs) == 3, "Input quant params not found in op inputs"
inp_scale = _expr.const(inputs[1])
inp_zero_point = _expr.const(inputs[2])
return relay.qnn.op.dequantize(inputs[0], inp_scale, inp_zero_point)
return _impl
def _get_numpy(relay_const_scalar):
return relay_const_scalar.data.asnumpy()
def _get_scalar(relay_const_scalar):
return np.asscalar(_get_numpy(relay_const_scalar))
def _do_bias_and_requantize(
output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
):
""" Output processing for conv and linear """
# this is a vector for per channel case
requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale))
# Torch does bias add and requanize scale in fp32
# refer to third_party/fbgemm/include/fbgemm/OutputProcessing-inl.h
# Instead, we do bias add in int32 and use qnn requantize, which needs
# integer input.
# We observed no loss in accuracy in doing this way, and it is better
# for tvm because bias quantization can be done at compile time
# Instead, the torch way requires rounding of activation at runtime
if bias is not None:
qbias = relay.qnn.op.quantize(
bias, requant_input_scale, _expr.const(0, "int32"), out_dtype="int32", axis=0
)
requantize_input = _op.nn.bias_add(output, qbias)
else:
requantize_input = output
requantized = relay.qnn.op.requantize(
requantize_input,
requant_input_scale,
relay.const(0, "int32"),
output_scale,
output_zero_point,
out_dtype="int32",
axis=1,
)
clip_min = 0
if with_relu:
clip_min = _get_scalar(output_zero_point)
clip = _op.tensor.clip(requantized, clip_min, 255.0)
return _op.cast(clip, dtype="uint8")
def _quantized_conv2d(with_relu=False):
def _impl(inputs, _):
# refer to src/ATen/native/quantized/cpu/qconv.cpp
# inputs[0]: input tensor
# inputs[1]: (weight, scale, zero_point, bias)
# inputs[2-5]: stride, padding, dilation, groups
# inputs[6]: output_scale
# inputs[7]: output_zero_point
# inputs[8]: input_scale (added manually by frontend)
# inputs[9]: input_zero_point (added manually by frontend)
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[6])
output_zero_point = _expr.const(inputs[7])
assert len(inputs) == 10, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[8])
input_zero_point = _expr.const(inputs[9])
strides, padding, dilation = inputs[2], inputs[3], inputs[4]
strides = inputs[2]
padding = inputs[3]
dilation = inputs[4]
groups = inputs[5]
weight_shape = infer_shape(weight)
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[0]
if padding[0] != 0 or padding[1] != 0:
pad_val = _get_scalar(input_zero_point)
inp = _op.nn.pad(
inputs[0],
pad_width=((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])),
pad_value=float(pad_val),
)
else:
inp = inputs[0]
# padding is (0, 0) because we did explicit pad op with
# pad value being zero point above
conv_out = relay.qnn.op.conv2d(
inp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=(0, 0),
groups=groups,
channels=out_channels,
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
conv_out,
bias_var,
input_scale,
weight_scale,
output_scale,
output_zero_point,
with_relu,
)
return _impl
def _linear(with_relu=False):
# similar to conv
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = infer_shape(weight)
dense = relay.qnn.op.dense(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=weight_shape[0],
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
dense, bias_var, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _binop(relay_op, with_relu=False, fp32_piggy_back=False):
def qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
qnn_out = relay_op(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
if with_relu:
clip_min = _get_scalar(output_zero_point)
return _op.tensor.clip(qnn_out, clip_min, 255)
return qnn_out
# refer to aten/src/ATen/native/quantized/cpu/{qadd, qmul}.cpp
# they piggy backs to fp32 math by dequantize -> fp32 math -> quantize
def torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
if isinstance(lhs, _expr.Call) and lhs.op.name == "qnn.quantize":
lhs = lhs.args[0]
else:
lhs = relay.qnn.op.dequantize(lhs, input_scale_lhs, input_zero_point_lhs)
if isinstance(rhs, _expr.Call) and rhs.op.name == "qnn.quantize":
rhs = rhs.args[0]
else:
rhs = relay.qnn.op.dequantize(rhs, input_scale_rhs, input_zero_point_rhs)
fp32_out = relay_op(lhs, rhs)
if with_relu:
fp32_out = _op.nn.relu(fp32_out)
return relay.qnn.op.quantize(
fp32_out, output_scale, output_zero_point, axis=-1, out_dtype="uint8"
)
def _impl(inputs, _):
lhs = inputs[0]
rhs = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 8, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale_lhs = _expr.const(inputs[4])
input_zero_point_lhs = _expr.const(inputs[5])
input_scale_rhs = _expr.const(inputs[6])
input_zero_point_rhs = _expr.const(inputs[7])
if fp32_piggy_back:
logging.info("Piggy backing to FP32 op (PyTorch way)")
return torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return _impl
def _cat(fp32_piggy_back=False):
# refer to aten/src/ATen/native/quantized/cpu/qconcat.cpp
# for concat they also piggy backs to fp32(!)
# dequantize -> fp32 math -> quantize
def torch_impl(inputs, input_scales, input_zero_points, output_scale, output_zero_point, axis):
dequantized = []
for inp, inp_scale, inp_zp in zip(inputs, input_scales, input_zero_points):
dequantized.append(relay.qnn.op.dequantize(inp, inp_scale, inp_zp))
concat = _op.tensor.concatenate(dequantized, axis=axis)
return relay.qnn.op.quantize(
concat, output_scale, output_zero_point, axis=axis, out_dtype="uint8"
)
def _impl(inputs, _):
axis = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
num_inputs = (len(inputs) - 4) // 2
input_scales = []
input_zero_points = []
for i in range(0, num_inputs):
input_scales.append(_expr.const(inputs[4 + i * 2]))
input_zero_points.append(_expr.const(inputs[4 + i * 2 + 1]))
if fp32_piggy_back:
return torch_impl(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return relay.qnn.op.concatenate(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return _impl
def _add_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
assert len(inputs) == 6, "Input quant params not found in op inputs"
s = inputs[4]
z = inputs[5]
c = inputs[1]
c_q = round(c / s)
q_min = 0
q_max = 255
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
out_scale = _expr.const(inputs[2])
out_zp = _expr.const(inputs[3])
if q_min > z - c_q or q_max < z - c_q:
dequant = relay.qnn.op.dequantize(inputs[0], _expr.const(s), _expr.const(z))
dequantized_add = _op.tensor.add(dequant, _expr.const(c_q * s))
return relay.qnn.op.quantize(
dequantized_add, out_scale, out_zp, axis=1, out_dtype="uint8"
)
# only scale change
return inputs[0]
return _impl
def quantize_scalar(data, scale, zero_point):
# used to quantize 6., in mobilenet v3
transformed = zero_point + data / scale
return max(0, min(round(transformed), 255))
def _relu6():
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl(inputs, _):
assert len(inputs) == 4, "Input quant params not found in op inputs"
input_scale = inputs[2]
input_zero_point = inputs[3]
six = quantize_scalar(6.0, input_scale, input_zero_point)
return _op.tensor.clip(inputs[0], input_zero_point, six)
return _impl
def _mul_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
assert len(inputs) == 6, "Input quant params not found in op inputs"
other_val = inputs[1] # scalar
if other_val > 0.0:
# only scale change
return inputs[0]
if other_val == 0.0:
shape = infer_shape(inputs[0])
return _op.full(_expr.const(0), shape, dtype="uint8")
# negative scale case
q_min = 0
q_max = 255
bias = _expr.const(q_max + q_min, dtype="int8")
int8 = bias - _op.cast(inputs[0], "int8")
return _op.cast(int8, "uint8")
return _impl
convert_map = {
"aten::quantize_per_tensor": _quantize_per_tensor(),
"quantized::conv2d_relu": _quantized_conv2d(with_relu=True),
"aten::dequantize": _dequantize(),
"quantized::conv2d": _quantized_conv2d(),
"quantized::add_relu": _binop(relay.qnn.op.add, with_relu=True),
"quantized::add": _binop(relay.qnn.op.add),
"quantized::mul_relu": _binop(relay.qnn.op.mul, with_relu=True),
"quantized::mul": _binop(relay.qnn.op.mul),
"quantized::linear": _linear(),
"quantized::linear_relu": _linear(with_relu=True),
"quantized::cat": _cat(),
"quantized::add_scalar": _add_scalar(),
"quantized::mul_scalar": _mul_scalar(),
"quantized::relu6": _relu6(),
}
| apache-2.0 | -9,203,850,802,481,740,000 | 34.088575 | 99 | 0.601339 | false |
lamby/jenkins.debian.net | bin/reproducible_scheduler.py | 1 | 31885 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Mattia Rizzolo <mattia@mapreri.org>
# Copyright © 2015-2017 Holger Levsen <holger@layer-acht.org>
# Based on reproducible_scheduler.sh © 2014-2015 Holger Levsen <holger@layer-acht.org>
# Licensed under GPL-2
#
# Depends: python3 python3-debian
#
# Schedule packages to be build.
import sys
import lzma
import deb822
import aptsources.sourceslist
import smtplib
from subprocess import call
from apt_pkg import version_compare
from urllib.request import urlopen
from sqlalchemy import sql
from email.mime.text import MIMEText
from reproducible_common import *
from reproducible_html_live_status import generate_schedule
from reproducible_html_packages import gen_packages_html
from reproducible_html_packages import purge_old_pages
"""
How the scheduler chooses which limit to apply, based on the MAXIMA
and LIMIT arrays:
First, the scheduler is only started for an architecture if the number of
currently scheduled packages is lower than MAXIMA*3. Then if the number of
scheduled packages is higher than MAXIMA, only new versions are scheduled...
Then, for each category (totally _untested_ packages, _new_ versions,
_ftbfs_ and _depwait_ packages and _old_ versions) it depends on how many
packages are already scheduled in that category, in a 3 steps process.
Only when scheduling old versions MINIMUM_AGE is respected.
Let's go by an example:
'unstable': {1: (250, 40), 2: (350, 20), '*': 5},
is translated to:
if total < 250:
40
elif total < 350:
20
else:
5
* 1st step, if there are less than 250 packages in the queue, schedule 40
* 2nd step, if there are less than 350 packages in the queue, schedule 20
* 3rd step, schedule 5
So, the 3rd step happens only when there are more than 350 packages queued up.
LIMITS_404 defines how many packages with status 404 are rescheduled at max.
"""
# only old packages older than this will be rescheduled
MINIMUM_AGE = {'amd64': 30, 'i386': 33, 'arm64': 21, 'armhf':30 }
# maximum queue size, see explainations above
MAXIMA = {'amd64': 750, 'i386': 750, 'arm64': 1000, 'armhf': 750}
# limits, see explainations above
LIMITS = {
'untested': {
'amd64': {
'testing': {'*': 440},
'unstable': {'*': 440},
'experimental': {'*': 440},
},
'i386': {
'testing': {'*': 440},
'unstable': {'*': 440},
'experimental': {'*': 440},
},
'arm64': {
'testing': {'*': 660},
'unstable': {'*': 660},
'experimental': {'*': 440},
},
'armhf': {
'testing': {'*': 440},
'unstable': {'*': 440},
'experimental': {'*': 440},
},
},
'new': {
'amd64': {
'testing': {1: (100, 250), 2: (200, 200), '*': 100},
'unstable': {1: (100, 250), 2: (200, 200), '*': 150},
'experimental': {1: (100, 250), 2: (200, 200), '*': 50},
},
'i386': {
'testing': {1: (100, 250), 2: (200, 200), '*': 100},
'unstable': {1: (100, 250), 2: (200, 200), '*': 150},
'experimental': {1: (100, 250), 2: (200, 200), '*': 50},
},
'arm64': {
'testing': {1: (100, 250), 2: (200, 200), '*': 50},
'unstable': {1: (100, 250), 2: (200, 200), '*': 75},
'experimental': {1: (100, 200), 2: (200, 200), '*': 25},
},
'armhf': {
'testing': {1: (100, 200), 2: (200, 200), '*': 50},
'unstable': {1: (100, 200), 2: (200, 200), '*': 75},
'experimental': {1: (100, 200), 2: (200, 200), '*': 25},
},
},
'ftbfs': {
'amd64': {
'testing': {1: (700, 40), 2: (500, 20), '*': 5},
'unstable': {1: (700, 40), 2: (500, 20), '*': 5},
'experimental': {1: (700, 40), 2: (500, 20), '*': 2},
},
'i386': {
'testing': {1: (700, 40), 2: (500, 20), '*': 5},
'unstable': {1: (700, 40), 2: (500, 20), '*': 5},
'experimental': {1: (700, 40), 2: (500, 20), '*': 2},
},
'arm64': {
'testing': {1: (700, 40), 2: (500, 20), '*': 5},
'unstable': {1: (700, 40), 2: (500, 20), '*': 5},
'experimental': {1: (700, 40), 2: (500, 20), '*': 2},
},
'armhf': {
'testing': {1: (575, 20), 2: (450, 10), '*': 5},
'unstable': {1: (575, 20), 2: (450, 10), '*': 5},
'experimental': {1: (575, 20), 2: (450, 10), '*': 2},
}
},
'depwait': {
'amd64': {
'testing': {1: (700, 400), 2: (500, 200), '*': 50},
'unstable': {1: (700, 400), 2: (500, 200), '*': 50},
'experimental': {1: (700, 400), 2: (500, 200), '*': 20},
},
'i386': {
'testing': {1: (700, 400), 2: (500, 200), '*': 50},
'unstable': {1: (700, 400), 2: (500, 200), '*': 50},
'experimental': {1: (700, 400), 2: (500, 200), '*': 20},
},
'arm64': {
'testing': {1: (700, 400), 2: (500, 200), '*': 50},
'unstable': {1: (700, 400), 2: (500, 200), '*': 50},
'experimental': {1: (700, 400), 2: (500, 200), '*': 20},
},
'armhf': {
'testing': {1: (575, 200), 2: (450, 100), '*': 50},
'unstable': {1: (575, 200), 2: (450, 100), '*': 50},
'experimental': {1: (575, 200), 2: (450, 100), '*': 20},
}
},
'old': {
'amd64': {
'testing': {1: (300, 800), 2: (500, 500), '*': 0},
'unstable': {1: (300, 800), 2: (500, 500), '*': 0},
'experimental': {1: (300, 70), 2: (500, 50), '*': 0},
},
'i386': {
'testing': {1: (300, 800), 2: (500, 500), '*': 0},
'unstable': {1: (300, 800), 2: (500, 500), '*': 0},
'experimental': {1: (300, 70), 2: (500, 50), '*': 0},
},
'arm64': {
'testing': {1: (300, 800), 2: (500, 500), '*': 0},
'unstable': {1: (300, 800), 2: (500, 500), '*': 0},
'experimental': {1: (300, 70), 2: (500, 50), '*': 0},
},
'armhf': {
'testing': {1: (300, 800), 2: (500, 500), '*': 0},
'unstable': {1: (300, 800), 2: (500, 500), '*': 0},
'experimental': {1: (300, 70), 2: (500, 50), '*': 0},
}
}
}
# maximum amount of packages with status 404 which will be rescheduled
LIMIT_404 = 255
class Limit:
def __init__(self, arch, queue):
self.arch = arch
self.queue = queue
def get_level(self, stage):
try:
return int(LIMITS[self.queue][self.arch][self.suite][stage][0])
except KeyError:
log.error('No limit defined for the %s queue on %s/%s stage %s. '
'Returning 1', self.queue, self.suite, self.arch, stage)
return 1
except IndexError:
log.critical('The limit is not in the format "(level, limit)". '
'I can\'t guess what you want, giving up')
sys.exit(1)
def get_limit(self, stage):
try:
limit = LIMITS[self.queue][self.arch][self.suite][stage]
limit = limit[1]
except KeyError:
log.error('No limit defined for the %s queue on %s/%s stage %s. '
'Returning 1', self.queue, self.suite, self.arch, stage)
return 1
except IndexError:
log.critical('The limit is not in the format "(level, limit)". '
'I can\'t guess what you want, giving up')
sys.exit(1)
except TypeError:
# this is the case of the default target
if isinstance(limit, int):
pass
else:
raise
return int(limit)
def get_staged_limit(self, current_total):
if current_total <= self.get_level(1):
return self.get_limit(1)
elif current_total <= self.get_level(2):
return self.get_limit(2)
else:
return self.get_limit('*')
def update_sources(suite):
# download the sources file for this suite
mirror = 'http://ftp.de.debian.org/debian'
remotefile = mirror + '/dists/' + suite + '/main/source/Sources.xz'
log.info('Downloading sources file for %s: %s', suite, remotefile)
sources = lzma.decompress(urlopen(remotefile).read()).decode('utf8')
log.debug('\tdownloaded')
for arch in ARCHS:
log.info('Updating sources db for %s/%s...', suite, arch)
update_sources_db(suite, arch, sources)
log.info('DB update done for %s/%s done at %s.', suite, arch, datetime.now())
def update_sources_db(suite, arch, sources):
# extract relevant info (package name and version) from the sources file
new_pkgs = set()
newest_version = {}
for src in deb822.Sources.iter_paragraphs(sources.split('\n')):
pkg = (src['Package'], src['Version'], suite, arch)
# only keep the most recent version of a src for each package/suite/arch
key = src['Package'] + suite + arch
if key in newest_version:
oldversion = newest_version[key]
oldpackage = (src['Package'], oldversion, suite, arch)
new_pkgs.remove(oldpackage)
newest_version[key] = src['Version']
new_pkgs.add(pkg)
# get the current packages in the database
query = "SELECT name, version, suite, architecture FROM sources " + \
"WHERE suite='{}' AND architecture='{}'".format(suite, arch)
cur_pkgs = set([(p.name, p.version, p.suite, p.architecture) for p in query_db(query)])
pkgs_to_add = []
updated_pkgs = []
different_pkgs = [x for x in new_pkgs if x not in cur_pkgs]
log.debug('Packages different in the archive and in the db: %s',
different_pkgs)
for pkg in different_pkgs:
# pkg: (name, version, suite, arch)
query = "SELECT id, version, notify_maintainer FROM sources " + \
"WHERE name='{}' AND suite='{}' AND architecture='{}'"
query = query.format(pkg[0], pkg[2], pkg[3])
try:
result = query_db(query)[0]
except IndexError: # new package
pkgs_to_add.append({
'name': pkg[0],
'version': pkg[1],
'suite': pkg[2],
'architecture': pkg[3],
})
continue
pkg_id = result[0]
old_version = result[1]
notify_maint = int(result[2])
if version_compare(pkg[1], old_version) > 0:
log.debug('New version: ' + str(pkg) + ' (we had ' +
old_version + ')')
updated_pkgs.append({
'update_id': pkg_id,
'name': pkg[0],
'version': pkg[1],
'suite': pkg[2],
'architecture': pkg[3],
'notify_maintainer': notify_maint,
})
# Now actually update the database:
sources_table = db_table('sources')
# updated packages
log.info('Pushing ' + str(len(updated_pkgs)) +
' updated packages to the database...')
if updated_pkgs:
transaction = conn_db.begin()
update_query = sources_table.update().\
where(sources_table.c.id == sql.bindparam('update_id'))
conn_db.execute(update_query, updated_pkgs)
transaction.commit()
# new packages
if pkgs_to_add:
log.info('Now inserting %i new sources in the database: %s',
len(pkgs_to_add), pkgs_to_add)
transaction = conn_db.begin()
conn_db.execute(sources_table.insert(), pkgs_to_add)
transaction.commit()
# RM'ed packages
cur_pkgs_name = [x[0] for x in cur_pkgs]
new_pkgs_name = [x[0] for x in new_pkgs]
rmed_pkgs = [x for x in cur_pkgs_name if x not in new_pkgs_name]
log.info('Now deleting %i removed packages: %s', len(rmed_pkgs),
rmed_pkgs)
rmed_pkgs_id = []
pkgs_to_rm = []
query = "SELECT id FROM sources WHERE name='{}' AND suite='{}' " + \
"AND architecture='{}'"
for pkg in rmed_pkgs:
result = query_db(query.format(pkg, suite, arch))
rmed_pkgs_id.append({'deleteid': result[0][0]})
pkgs_to_rm.append({'name': pkg, 'suite': suite, 'architecture': arch})
log.debug('removed packages ID: %s',
[str(x['deleteid']) for x in rmed_pkgs_id])
log.debug('removed packages: %s', pkgs_to_rm)
if rmed_pkgs_id:
transaction = conn_db.begin()
results_table = db_table('results')
schedule_table = db_table('schedule')
notes_table = db_table('notes')
removed_packages_table = db_table('removed_packages')
delete_results_query = results_table.delete().\
where(results_table.c.package_id == sql.bindparam('deleteid'))
delete_schedule_query = schedule_table.delete().\
where(schedule_table.c.package_id == sql.bindparam('deleteid'))
delete_notes_query = notes_table.delete().\
where(notes_table.c.package_id == sql.bindparam('deleteid'))
delete_sources_query = sources_table.delete().\
where(sources_table.c.id == sql.bindparam('deleteid'))
conn_db.execute(delete_results_query, rmed_pkgs_id)
conn_db.execute(delete_schedule_query, rmed_pkgs_id)
conn_db.execute(delete_notes_query, rmed_pkgs_id)
conn_db.execute(delete_sources_query, rmed_pkgs_id)
conn_db.execute(removed_packages_table.insert(), pkgs_to_rm)
transaction.commit()
# finally check whether the db has the correct number of packages
query = "SELECT count(*) FROM sources WHERE suite='{}' " + \
"AND architecture='{}'"
pkgs_end = query_db(query.format(suite, arch))
count_new_pkgs = len(set([x[0] for x in new_pkgs]))
if int(pkgs_end[0][0]) != count_new_pkgs:
print_critical_message('AH! The number of source in the Sources file' +
' is different than the one in the DB!')
log.critical('source in the debian archive for the %s suite: %s',
suite, str(count_new_pkgs))
log.critical('source in the reproducible db for the %s suite: %s',
suite, str(pkgs_end[0][0]))
sys.exit(1)
if pkgs_to_add:
log.info('Building pages for the new packages')
gen_packages_html([Package(x['name']) for x in pkgs_to_add], no_clean=True)
def print_schedule_result(suite, arch, criteria, packages):
'''
`packages` is the usual list-of-tuples returned by SQL queries,
where the first item is the id and the second one the package name
'''
log.info('Criteria: ' + criteria)
log.info('Suite/Arch: ' + suite + '/' + arch)
log.info('Amount: ' + str(len(packages)))
log.info('Packages: ' + ' '.join([x[1] for x in packages]))
def queue_packages(all_pkgs, packages, date):
date = date.strftime('%Y-%m-%d %H:%M')
pkgs = [x for x in packages if x[0] not in all_pkgs]
if len(pkgs) > 0:
log.info('The following ' + str(len(pkgs)) + ' source packages have ' +
'been queued up for scheduling at ' + date + ': ' +
' '.join([str(x[1]) for x in pkgs]))
all_pkgs.update({x[0]: date for x in pkgs})
return all_pkgs
def schedule_packages(packages):
pkgs = [{'package_id': x, 'date_scheduled': packages[x]} for x in packages.keys()]
log.debug('IDs about to be scheduled: %s', packages.keys())
if pkgs:
conn_db.execute(db_table('schedule').insert(), pkgs)
def add_up_numbers(packages, arch):
packages_sum = '+'.join([str(len(packages[x])) for x in SUITES])
if packages_sum == '0+0+0':
packages_sum = '0'
return packages_sum
def query_untested_packages(suite, arch, limit):
criteria = 'not tested before, randomly sorted'
query = """SELECT DISTINCT *
FROM (
SELECT sources.id, sources.name FROM sources
WHERE sources.suite='{suite}' AND sources.architecture='{arch}'
AND sources.id NOT IN
(SELECT schedule.package_id FROM schedule)
AND sources.id NOT IN
(SELECT results.package_id FROM results)
ORDER BY random()
) AS tmp
LIMIT {limit}""".format(suite=suite, arch=arch, limit=limit)
packages = query_db(query)
print_schedule_result(suite, arch, criteria, packages)
return packages
def query_new_versions(suite, arch, limit):
criteria = 'tested before, new version available, sorted by last build date'
query = """SELECT s.id, s.name, s.version, r.version, max(r.build_date) max_date
FROM sources AS s JOIN results AS r ON s.id = r.package_id
WHERE s.suite='{suite}' AND s.architecture='{arch}'
AND s.version != r.version
AND r.status != 'blacklisted'
AND s.id IN (SELECT package_id FROM results)
AND s.id NOT IN (SELECT schedule.package_id FROM schedule)
GROUP BY s.id, s.name, s.version, r.version
ORDER BY max_date
LIMIT {limit}""".format(suite=suite, arch=arch, limit=limit)
pkgs = query_db(query)
# the next line avoids constant rescheduling of packages:
# packages in our repository != sid or testing,
# so they will always be selected by the query above
# so we only accept them if there version is greater than the already tested one
packages = [(x[0], x[1]) for x in pkgs if version_compare(x[2], x[3]) > 0]
print_schedule_result(suite, arch, criteria, packages)
return packages
def query_old_ftbfs_versions(suite, arch, limit):
criteria = 'status ftbfs, no bug filed, tested at least 3 days ago, ' + \
'no new version available, sorted by last build date'
date = (datetime.now()-timedelta(days=3)).strftime('%Y-%m-%d %H:%M')
query = """SELECT s.id, s.name, max(r.build_date) max_date
FROM sources AS s JOIN results AS r ON s.id = r.package_id
JOIN notes AS n ON n.package_id=s.id
WHERE s.suite='{suite}' AND s.architecture='{arch}'
AND r.status='FTBFS'
AND ( n.bugs = '[]' OR n.bugs IS NULL )
AND r.build_date < '{date}'
AND s.id NOT IN (SELECT schedule.package_id FROM schedule)
GROUP BY s.id, s.name
ORDER BY max_date
LIMIT {limit}""".format(suite=suite, arch=arch, limit=limit,
date=date)
packages = query_db(query)
print_schedule_result(suite, arch, criteria, packages)
return packages
def query_old_depwait_versions(suite, arch, limit):
criteria = 'status depwait, no bug filed, tested at least 2 days ago, ' + \
'no new version available, sorted by last build date'
date = (datetime.now()-timedelta(days=2)).strftime('%Y-%m-%d %H:%M')
query = """SELECT s.id, s.name, max(r.build_date) max_date
FROM sources AS s JOIN results AS r ON s.id = r.package_id
WHERE s.suite='{suite}' AND s.architecture='{arch}'
AND r.status='depwait'
AND r.build_date < '{date}'
AND s.id NOT IN (SELECT schedule.package_id FROM schedule)
GROUP BY s.id, s.name
ORDER BY max_date
LIMIT {limit}""".format(suite=suite, arch=arch, limit=limit,
date=date)
packages = query_db(query)
print_schedule_result(suite, arch, criteria, packages)
return packages
def query_old_versions(suite, arch, limit):
criteria = """tested at least {minimum_age} days ago, no new version available,
sorted by last build date""".format(minimum_age=MINIMUM_AGE[arch])
date = (datetime.now()-timedelta(days=MINIMUM_AGE[arch]))\
.strftime('%Y-%m-%d %H:%M')
query = """SELECT s.id, s.name, max(r.build_date) max_date
FROM sources AS s JOIN results AS r ON s.id = r.package_id
WHERE s.suite='{suite}' AND s.architecture='{arch}'
AND r.status != 'blacklisted'
AND r.build_date < '{date}'
AND s.id NOT IN (SELECT schedule.package_id FROM schedule)
GROUP BY s.id, s.name
ORDER BY max_date
LIMIT {limit}""".format(suite=suite, arch=arch,
date=date, limit=limit)
packages = query_db(query)
print_schedule_result(suite, arch, criteria, packages)
return packages
def query_404_versions(suite, arch, limit):
criteria = """tested at least a day ago, status 404,
sorted by last build date"""
date = (datetime.now()-timedelta(days=1)).strftime('%Y-%m-%d %H:%M')
query = """SELECT s.id, s.name, max(r.build_date) max_date
FROM sources AS s JOIN results AS r ON s.id = r.package_id
WHERE s.suite='{suite}' AND s.architecture='{arch}'
AND r.status = '404'
AND r.build_date < '{date}'
AND s.id NOT IN (SELECT schedule.package_id FROM schedule)
GROUP BY s.id, s.name
ORDER BY max_date
LIMIT {limit}""".format(suite=suite, arch=arch, limit=limit,
date=date)
packages = query_db(query)
print_schedule_result(suite, arch, criteria, packages)
return packages
def schedule_untested_packages(arch, total):
packages = {}
limit = Limit(arch, 'untested')
for suite in SUITES:
limit.suite = suite
many_untested = limit.get_limit('*')
log.info('Requesting %s untested packages in %s/%s...',
many_untested, suite, arch)
packages[suite] = query_untested_packages(suite, arch, many_untested)
log.info('Received ' + str(len(packages[suite])) +
' untested packages in ' + suite + '/' + arch + 'to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' new packages'
else:
msg = ''
return packages, msg
def schedule_new_versions(arch, total):
packages = {}
limit = Limit(arch, 'new')
for suite in SUITES:
limit.suite = suite
many_new = limit.get_staged_limit(total)
log.info('Requesting %s new versions in %s/%s...',
many_new, suite, arch)
packages[suite] = query_new_versions(suite, arch, many_new)
log.info('Received ' + str(len(packages[suite])) +
' new packages in ' + suite + '/' + arch + ' to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' new versions'
else:
msg = ''
return packages, msg
def schedule_old_ftbfs_versions(arch, total):
packages = {}
limit = Limit(arch, 'ftbfs')
for suite in SUITES:
limit.suite = suite
old_ftbfs = limit.get_staged_limit(total)
log.info('Requesting %s old ftbfs packages in %s/%s...', old_ftbfs,
suite, arch)
packages[suite] = query_old_ftbfs_versions(suite, arch, old_ftbfs)
log.info('Received ' + str(len(packages[suite])) +
' old ftbfs packages in ' + suite + '/' + arch + ' to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' ftbfs without bugs filed'
else:
msg = ''
return packages, msg
def schedule_old_depwait_versions(arch, total):
packages = {}
limit = Limit(arch, 'depwait')
for suite in SUITES:
limit.suite = suite
old_depwait = limit.get_staged_limit(total)
log.info('Requesting %s old depwait packages in %s/%s...', old_depwait,
suite, arch)
packages[suite] = query_old_depwait_versions(suite, arch, old_depwait)
log.info('Received ' + str(len(packages[suite])) +
' old depwait packages in ' + suite + '/' + arch + ' to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' in depwait state'
else:
msg = ''
return packages, msg
def schedule_old_versions(arch, total):
packages = {}
limit = Limit(arch, 'old')
for suite in SUITES:
limit.suite = suite
many_old = limit.get_staged_limit(total)
log.info('Requesting %s old packages in %s/%s...', many_old,
suite, arch)
packages[suite] = query_old_versions(suite, arch, many_old)
log.info('Received ' + str(len(packages[suite])) +
' old packages in ' + suite + '/' + arch + ' to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' known versions'
else:
msg = ''
return packages, msg
def schedule_404_versions(arch, total):
packages = {}
for suite in SUITES:
log.info('Requesting 404 packages in %s/%s...',
suite, arch)
packages[suite] = query_404_versions(suite, arch, LIMIT_404)
log.info('Received ' + str(len(packages[suite])) +
' 404 packages in ' + suite + '/' + arch + ' to schedule.')
log.info('--------------------------------------------------------------')
msg = add_up_numbers(packages, arch)
if msg != '0':
msg += ' with status \'404\''
else:
msg = ''
return packages, msg
def scheduler(arch):
query = "SELECT count(*) " + \
"FROM schedule AS p JOIN sources AS s ON p.package_id=s.id " + \
"WHERE s.architecture='{arch}'"
total = int(query_db(query.format(arch=arch))[0][0])
log.info('==============================================================')
log.info('Currently scheduled packages in all suites on ' + arch + ': ' + str(total))
if total > MAXIMA[arch]:
log.info(str(total) + ' packages already scheduled' +
', only scheduling new versions.')
empty_pkgs = {}
for suite in SUITES:
empty_pkgs[suite] = []
untested, msg_untested = empty_pkgs, ''
new, msg_new = schedule_new_versions(arch, total)
old_ftbfs, msg_old_ftbfs = empty_pkgs, ''
old_depwait, msg_old_depwait = empty_pkgs, ''
old, msg_old = empty_pkgs, ''
four04, msg_404 = empty_pkgs, ''
else:
log.info(str(total) + ' packages already scheduled' +
', scheduling some more...')
untested, msg_untested = schedule_untested_packages(arch, total)
new, msg_new = schedule_new_versions(arch, total+len(untested))
old_ftbfs, msg_old_ftbfs = schedule_old_ftbfs_versions(arch, total+len(untested)+len(new))
old_depwait, msg_old_depwait = schedule_old_depwait_versions(arch, total+len(untested)+len(new)+len(old_ftbfs))
four04, msg_404 = schedule_404_versions(arch, total+len(untested)+len(new)+len(old_ftbfs)+len(old_depwait))
old, msg_old = schedule_old_versions(arch, total+len(untested)+len(new)+len(old_ftbfs)+len(old_depwait)+len(four04))
now_queued_here = {}
# make sure to schedule packages in unstable first
# (but keep the view ordering everywhere else)
priotized_suite_order = ['unstable']
for suite in SUITES:
if suite not in priotized_suite_order:
priotized_suite_order.append(suite)
for suite in priotized_suite_order:
query = "SELECT count(*) " \
"FROM schedule AS p JOIN sources AS s ON p.package_id=s.id " \
"WHERE s.suite='{suite}' AND s.architecture='{arch}'"
query = query.format(suite=suite, arch=arch)
now_queued_here[suite] = int(query_db(query)[0][0]) + \
len(untested[suite]+new[suite]+old[suite])
# schedule packages differently in the queue...
to_be_scheduled = queue_packages({}, untested[suite], datetime.now()+timedelta(minutes=-720))
assert(isinstance(to_be_scheduled, dict))
to_be_scheduled = queue_packages(to_be_scheduled, new[suite], datetime.now()+timedelta(minutes=-1440))
to_be_scheduled = queue_packages(to_be_scheduled, old_ftbfs[suite], datetime.now()+timedelta(minutes=360))
to_be_scheduled = queue_packages(to_be_scheduled, old_depwait[suite], datetime.now()+timedelta(minutes=-360))
to_be_scheduled = queue_packages(to_be_scheduled, old[suite], datetime.now()+timedelta(minutes=720))
to_be_scheduled = queue_packages(to_be_scheduled, four04[suite], datetime.now())
schedule_packages(to_be_scheduled)
# update the scheduled page
generate_schedule(arch) # from reproducible_html_indexes
# build the message text for this arch
message = ' - ' + arch + ': '
if msg_untested:
message += msg_untested + ', '
if msg_new:
message += msg_new + ', '
if msg_404:
message += msg_404 + ', '
if msg_old_ftbfs:
message += msg_old_ftbfs + ', '
if msg_old_depwait:
message += msg_old_depwait + ', '
if msg_old:
message += msg_old + ', '
total = [now_queued_here[x] for x in SUITES]
message += 'for ' + str(sum(total))
message += ' or ' + '+'.join([str(now_queued_here[x]) for x in SUITES])
message += ' in total.'
log.info('Scheduling for architecture ' + arch + ' done.')
log.info('--------------------------------------------------------------')
# only notifiy irc if there were packages scheduled in any suite
for x in SUITES:
if len(untested[x])+len(new[x])+len(old[x])+len(old_ftbfs[x])+len(old_depwait[x]) > 0:
return message
return ''
if __name__ == '__main__':
log.info('Updating sources tables for all suites.')
for suite in SUITES:
update_sources(suite)
log.info('Sources for suite %s done at %s.', suite, datetime.now())
purge_old_pages()
query = "SELECT count(*) " + \
"FROM schedule AS p JOIN sources AS s ON s.id=p.package_id " + \
"WHERE s.architecture='{}'"
message = ''
for arch in ARCHS:
log.info('Scheduling for %s...', arch)
overall = int(query_db(query.format(arch))[0][0])
if overall > (MAXIMA[arch]*3):
log.info('%s packages already scheduled for %s, nothing to do.', overall, arch)
continue
log.info('%s packages already scheduled for %s, probably scheduling some '
'more...', overall, arch)
message += scheduler(arch) + '\n'
log.info('Arch %s scheduled at %s.', arch, datetime.now())
if message != '':
# build the kgb message text
message = 'Scheduled in ' + '+'.join(SUITES) + ':\n' + message
log.info(message)
# irc_msg(message, channel='debian-reproducible-changes')
# send mail instead of notifying via irc, less intrusive
msg = MIMEText(message)
mail_from = 'jenkins@jenkins.debian.net'
mail_to = 'qa-jenkins-scm@lists.alioth.debian.org'
msg['From'] = mail_from
msg['To'] = mail_to
msg['Subject'] = 'packages scheduled for reproducible Debian'
s = smtplib.SMTP('localhost')
s.sendmail(mail_from, [mail_to], msg.as_string())
s.quit()
| gpl-2.0 | 7,887,690,692,806,665,000 | 40.785059 | 124 | 0.545072 | false |
sdague/home-assistant | homeassistant/components/lifx_cloud/scene.py | 15 | 2756 | """Support for LIFX Cloud scenes."""
import asyncio
import logging
from typing import Any
import aiohttp
from aiohttp.hdrs import AUTHORIZATION
import async_timeout
import voluptuous as vol
from homeassistant.components.scene import Scene
from homeassistant.const import (
CONF_PLATFORM,
CONF_TIMEOUT,
CONF_TOKEN,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "lifx_cloud",
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the scenes stored in the LIFX Cloud."""
token = config.get(CONF_TOKEN)
timeout = config.get(CONF_TIMEOUT)
headers = {AUTHORIZATION: f"Bearer {token}"}
url = "https://api.lifx.com/v1/scenes"
try:
httpsession = async_get_clientsession(hass)
with async_timeout.timeout(timeout):
scenes_resp = await httpsession.get(url, headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
return False
status = scenes_resp.status
if status == HTTP_OK:
data = await scenes_resp.json()
devices = [LifxCloudScene(hass, headers, timeout, scene) for scene in data]
async_add_entities(devices)
return True
if status == HTTP_UNAUTHORIZED:
_LOGGER.error("Unauthorized (bad token?) on %s", url)
return False
_LOGGER.error("HTTP error %d on %s", scenes_resp.status, url)
return False
class LifxCloudScene(Scene):
"""Representation of a LIFX Cloud scene."""
def __init__(self, hass, headers, timeout, scene_data):
"""Initialize the scene."""
self.hass = hass
self._headers = headers
self._timeout = timeout
self._name = scene_data["name"]
self._uuid = scene_data["uuid"]
@property
def name(self):
"""Return the name of the scene."""
return self._name
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
url = f"https://api.lifx.com/v1/scenes/scene_id:{self._uuid}/activate"
try:
httpsession = async_get_clientsession(self.hass)
with async_timeout.timeout(self._timeout):
await httpsession.put(url, headers=self._headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.exception("Error on %s", url)
| apache-2.0 | -475,669,396,795,656,100 | 28.634409 | 86 | 0.653483 | false |
tiexinliu/odoo_addons | smile_model_methods/wizard/wizard_ir_model_method.py | 5 | 1527 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models
class WizardIrModelMethods(models.TransientModel):
_name = 'wizard.ir.model.methods'
_description = 'Wizard Model Method'
_rec_name = ''
models_id = fields.Many2many('ir.model', 'ir_model_methotds_rel', 'wizard_model_id', 'model_id', string="Model list")
to_update = fields.Boolean("Update lines ?")
@api.multi
def button_call(self):
self.ensure_one()
self.env['ir.model.methods'].with_context(to_update=self.to_update).update_list(self.models_id.mapped('model'))
| agpl-3.0 | -1,046,811,137,113,192,200 | 41.416667 | 121 | 0.626064 | false |
alfredodeza/boto | boto/sqs/regioninfo.py | 167 | 1524 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class SQSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
from boto.sqs.connection import SQSConnection
super(SQSRegionInfo, self).__init__(connection, name, endpoint,
SQSConnection)
| mit | 2,435,055,160,840,179,000 | 45.181818 | 74 | 0.739501 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.