repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
endlessm/chromium-browser | third_party/chromite/api/field_handler.py | 1 | 11972 | # -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Field handler classes.
The field handlers are meant to parse information from or do some other generic
action for a specific field type for the build_api script.
"""
from __future__ import print_function
import contextlib
import functools
import os
import shutil
import sys
from google.protobuf import message as protobuf_message
from chromite.api.controller import controller_util
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class Error(Exception):
"""Base error class for the module."""
class InvalidResultPathError(Error):
"""Result path is invalid."""
class ChrootHandler(object):
"""Translate a Chroot message to chroot enter arguments and env."""
def __init__(self, clear_field):
self.clear_field = clear_field
def handle(self, message):
"""Parse a message for a chroot field."""
# Find the Chroot field. Search for the field by type to prevent it being
# tied to a naming convention.
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.Chroot):
chroot = field
if self.clear_field:
message.ClearField(descriptor.name)
return self.parse_chroot(chroot)
return None
def parse_chroot(self, chroot_message):
"""Parse a Chroot message instance."""
return controller_util.ParseChroot(chroot_message)
def handle_chroot(message, clear_field=True):
"""Find and parse the chroot field, returning the Chroot instance.
Returns:
chroot_lib.Chroot
"""
handler = ChrootHandler(clear_field)
chroot = handler.handle(message)
if chroot:
return chroot
logging.warning('No chroot message found, falling back to defaults.')
return handler.parse_chroot(common_pb2.Chroot())
def handle_goma(message, chroot_path):
"""Find and parse the GomaConfig field, returning the Goma instance."""
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.GomaConfig):
goma_config = field
return controller_util.ParseGomaConfig(goma_config, chroot_path)
return None
class PathHandler(object):
"""Handles copying a file or directory into or out of the chroot."""
INSIDE = common_pb2.Path.INSIDE
OUTSIDE = common_pb2.Path.OUTSIDE
def __init__(self, field, destination, delete, prefix=None, reset=True):
"""Path handler initialization.
Args:
field (common_pb2.Path): The Path message.
destination (str): The destination base path.
delete (bool): Whether the copied file(s) should be deleted on cleanup.
prefix (str|None): A path prefix to remove from the destination path
when moving files inside the chroot, or to add to the source paths when
moving files out of the chroot.
reset (bool): Whether to reset the state on cleanup.
"""
assert isinstance(field, common_pb2.Path)
assert field.path
assert field.location
self.field = field
self.destination = destination
self.prefix = prefix or ''
self.delete = delete
self.tempdir = None
self.reset = reset
# For resetting the state.
self._transferred = False
self._original_message = common_pb2.Path()
self._original_message.CopyFrom(self.field)
def transfer(self, direction):
"""Copy the file or directory to its destination.
Args:
direction (int): The direction files are being copied (into or out of
the chroot). Specifying the direction allows avoiding performing
unnecessary copies.
"""
if self._transferred:
return
assert direction in [self.INSIDE, self.OUTSIDE]
if self.field.location == direction:
# Already in the correct location, nothing to do.
return
# Create a tempdir for the copied file if we're cleaning it up afterwords.
if self.delete:
self.tempdir = osutils.TempDir(base_dir=self.destination)
destination = self.tempdir.tempdir
else:
destination = self.destination
source = self.field.path
if direction == self.OUTSIDE and self.prefix:
# When we're extracting files, we need /tmp/result to be
# /path/to/chroot/tmp/result.
source = os.path.join(self.prefix, source.lstrip(os.sep))
if os.path.isfile(source):
# File - use the old file name, just copy it into the destination.
dest_path = os.path.join(destination, os.path.basename(source))
copy_fn = shutil.copy
else:
# Directory - just copy everything into the new location.
dest_path = destination
copy_fn = functools.partial(osutils.CopyDirContents, allow_nonempty=True)
logging.debug('Copying %s to %s', source, dest_path)
copy_fn(source, dest_path)
# Clean up the destination path for returning, if applicable.
return_path = dest_path
if direction == self.INSIDE and return_path.startswith(self.prefix):
return_path = return_path[len(self.prefix):]
self.field.path = return_path
self.field.location = direction
self._transferred = True
def cleanup(self):
if self.tempdir:
self.tempdir.Cleanup()
self.tempdir = None
if self.reset:
self.field.CopyFrom(self._original_message)
class SyncedDirHandler(object):
"""Handler for syncing directories across the chroot boundary."""
def __init__(self, field, destination, prefix):
self.field = field
self.prefix = prefix
self.source = self.field.dir
if not self.source.endswith(os.sep):
self.source += os.sep
self.destination = destination
if not self.destination.endswith(os.sep):
self.destination += os.sep
# For resetting the message later.
self._original_message = common_pb2.SyncedDir()
self._original_message.CopyFrom(self.field)
def _sync(self, src, dest):
logging.info('Syncing %s to %s', src, dest)
# TODO: This would probably be more efficient with rsync.
osutils.EmptyDir(dest)
osutils.CopyDirContents(src, dest)
def sync_in(self):
"""Sync files from the source directory to the destination directory."""
self._sync(self.source, self.destination)
self.field.dir = '/%s' % os.path.relpath(self.destination, self.prefix)
def sync_out(self):
"""Sync files from the destination directory to the source directory."""
self._sync(self.destination, self.source)
self.field.CopyFrom(self._original_message)
@contextlib.contextmanager
def copy_paths_in(message, destination, delete=True, prefix=None):
"""Context manager function to transfer and cleanup all Path messages.
Args:
message (Message): A message whose Path messages should be transferred.
destination (str): The base destination path.
delete (bool): Whether the file(s) should be deleted.
prefix (str|None): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[PathHandler]: The path handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix, delete=delete,
reset=True)
for handler in handlers:
handler.transfer(PathHandler.INSIDE)
try:
yield handlers
finally:
for handler in handlers:
handler.cleanup()
@contextlib.contextmanager
def sync_dirs(message, destination, prefix):
"""Context manager function to handle SyncedDir messages.
The sync semantics are effectively:
rsync -r --del source/ destination/
* The endpoint runs. *
rsync -r --del destination/ source/
Args:
message (Message): A message whose SyncedPath messages should be synced.
destination (str): The destination path.
prefix (str): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[SyncedDirHandler]: The handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix=prefix,
delete=False, reset=True,
message_type=common_pb2.SyncedDir)
for handler in handlers:
handler.sync_in()
try:
yield handlers
finally:
for handler in handlers:
handler.sync_out()
def extract_results(request_message, response_message, chroot):
"""Transfer all response Path messages to the request's ResultPath.
Args:
request_message (Message): The request message containing a ResultPath
message.
response_message (Message): The response message whose Path message(s)
are to be transferred.
chroot (chroot_lib.Chroot): The chroot the files are being copied out of.
"""
# Find the ResultPath.
for descriptor in request_message.DESCRIPTOR.fields:
field = getattr(request_message, descriptor.name)
if isinstance(field, common_pb2.ResultPath):
result_path_message = field
break
else:
# No ResultPath to handle.
return
destination = result_path_message.path.path
handlers = _extract_handlers(response_message, destination, chroot.path,
delete=False, reset=False)
for handler in handlers:
handler.transfer(PathHandler.OUTSIDE)
handler.cleanup()
def _extract_handlers(message, destination, prefix, delete=False, reset=False,
field_name=None, message_type=None):
"""Recursive helper for handle_paths to extract Path messages."""
message_type = message_type or common_pb2.Path
is_path_target = message_type is common_pb2.Path
is_synced_target = message_type is common_pb2.SyncedDir
is_message = isinstance(message, protobuf_message.Message)
is_result_path = isinstance(message, common_pb2.ResultPath)
if not is_message or is_result_path:
# Base case: Nothing to handle.
# There's nothing we can do with scalar values.
# Skip ResultPath instances to avoid unnecessary file copying.
return []
elif is_path_target and isinstance(message, common_pb2.Path):
# Base case: Create handler for this message.
if not message.path or not message.location:
logging.debug('Skipping %s; incomplete.', field_name or 'message')
return []
handler = PathHandler(message, destination, delete=delete, prefix=prefix,
reset=reset)
return [handler]
elif is_synced_target and isinstance(message, common_pb2.SyncedDir):
if not message.dir:
logging.debug('Skipping %s; no directory given.', field_name or 'message')
return []
handler = SyncedDirHandler(message, destination, prefix)
return [handler]
# Iterate through each field and recurse.
handlers = []
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if field_name:
new_field_name = '%s.%s' % (field_name, descriptor.name)
else:
new_field_name = descriptor.name
if isinstance(field, protobuf_message.Message):
# Recurse for nested Paths.
handlers.extend(
_extract_handlers(field, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
else:
# If it's iterable it may be a repeated field, try each element.
try:
iterator = iter(field)
except TypeError:
# Definitely not a repeated field, just move on.
continue
for element in iterator:
handlers.extend(
_extract_handlers(element, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
return handlers
| bsd-3-clause |
cloudera/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/test/orm/test_update_delete.py | 2 | 36647 | from sqlalchemy import Boolean
from sqlalchemy import case
from sqlalchemy import column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.orm import backref
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class UpdateDeleteTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(32)),
Column("age_int", Integer),
)
Table(
"addresses",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("users.id")),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[
dict(id=1, name="john", age_int=25),
dict(id=2, name="jack", age_int=47),
dict(id=3, name="jill", age_int=29),
dict(id=4, name="jane", age_int=37),
],
)
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
Address = cls.classes.Address
addresses = cls.tables.addresses
mapper(
User,
users,
properties={
"age": users.c.age_int,
"addresses": relationship(Address),
},
)
mapper(Address, addresses)
def test_illegal_eval(self):
User = self.classes.User
s = Session()
assert_raises_message(
exc.ArgumentError,
"Valid strategies for session synchronization "
"are 'evaluate', 'fetch', False",
s.query(User).update,
{},
synchronize_session="fake",
)
def test_illegal_operations(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
for q, mname in (
(s.query(User).limit(2), r"limit\(\)"),
(s.query(User).offset(2), r"offset\(\)"),
(s.query(User).limit(2).offset(2), r"limit\(\)"),
(s.query(User).order_by(User.id), r"order_by\(\)"),
(s.query(User).group_by(User.id), r"group_by\(\)"),
(s.query(User).distinct(), r"distinct\(\)"),
(
s.query(User).join(User.addresses),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)",
),
(
s.query(User).outerjoin(User.addresses),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)",
),
(
s.query(User).select_from(Address),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)",
),
(
s.query(User).from_self(),
r"join\(\), outerjoin\(\), select_from\(\), or from_self\(\)",
),
):
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.update\(\) or Query.delete\(\) when "
"%s has been called" % mname,
q.update,
{"name": "ed"},
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call Query.update\(\) or Query.delete\(\) when "
"%s has been called" % mname,
q.delete,
)
def test_evaluate_clauseelement(self):
User = self.classes.User
class Thing(object):
def __clause_element__(self):
return User.name.__clause_element__()
s = Session()
jill = s.query(User).get(3)
s.query(User).update(
{Thing(): "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.name, "moonbeam")
def test_evaluate_invalid(self):
User = self.classes.User
class Thing(object):
def __clause_element__(self):
return 5
s = Session()
assert_raises_message(
exc.InvalidRequestError,
"Invalid expression type: 5",
s.query(User).update,
{Thing(): "moonbeam"},
synchronize_session="evaluate",
)
def test_evaluate_unmapped_col(self):
User = self.classes.User
s = Session()
jill = s.query(User).get(3)
s.query(User).update(
{column("name"): "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.name, "jill")
s.expire(jill)
eq_(jill.name, "moonbeam")
def test_evaluate_synonym_string(self):
class Foo(object):
pass
mapper(Foo, self.tables.users, properties={"uname": synonym("name")})
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{"uname": "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.uname, "moonbeam")
def test_evaluate_synonym_attr(self):
class Foo(object):
pass
mapper(Foo, self.tables.users, properties={"uname": synonym("name")})
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{Foo.uname: "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.uname, "moonbeam")
def test_evaluate_double_synonym_attr(self):
class Foo(object):
pass
mapper(
Foo,
self.tables.users,
properties={"uname": synonym("name"), "ufoo": synonym("uname")},
)
s = Session()
jill = s.query(Foo).get(3)
s.query(Foo).update(
{Foo.ufoo: "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.ufoo, "moonbeam")
def test_delete(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == "john", User.name == "jill")
).delete()
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
def test_delete_against_metadata(self):
User = self.classes.User
users = self.tables.users
sess = Session()
sess.query(users).delete(synchronize_session=False)
eq_(sess.query(User).count(), 0)
def test_delete_with_bindparams(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(text("name = :name")).params(
name="john"
).delete("fetch")
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jill, jane])
def test_delete_rollback(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == "john", User.name == "jill")
).delete(synchronize_session="evaluate")
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_rollback_with_fetch(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == "john", User.name == "jill")
).delete(synchronize_session="fetch")
assert john not in sess and jill not in sess
sess.rollback()
assert john in sess and jill in sess
def test_delete_without_session_sync(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == "john", User.name == "jill")
).delete(synchronize_session=False)
assert john in sess and jill in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
def test_delete_with_fetch_strategy(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(
or_(User.name == "john", User.name == "jill")
).delete(synchronize_session="fetch")
assert john not in sess and jill not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jane])
@testing.requires.update_where_target_in_subquery
def test_delete_invalid_evaluation(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
assert_raises(
exc.InvalidRequestError,
sess.query(User)
.filter(User.name == select([func.max(User.name)]))
.delete,
synchronize_session="evaluate",
)
sess.query(User).filter(
User.name == select([func.max(User.name)])
).delete(synchronize_session="fetch")
assert john not in sess
eq_(sess.query(User).order_by(User.id).all(), [jack, jill, jane])
def test_update(self):
User, users = self.classes.User, self.tables.users
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).update(
{"age": User.age - 10}, synchronize_session="evaluate"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 37, 29, 27])),
)
sess.query(User).filter(User.age > 29).update(
{User.age: User.age - 10}, synchronize_session="evaluate"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 29, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 27, 29, 27])),
)
sess.query(User).filter(User.age > 27).update(
{users.c.age_int: User.age - 10}, synchronize_session="evaluate"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 19, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 27, 19, 27])),
)
sess.query(User).filter(User.age == 25).update(
{User.age: User.age - 10}, synchronize_session="fetch"
)
eq_([john.age, jack.age, jill.age, jane.age], [15, 27, 19, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([15, 27, 19, 27])),
)
def test_update_against_table_col(self):
User, users = self.classes.User, self.tables.users
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
eq_([john.age, jack.age, jill.age, jane.age], [25, 47, 29, 37])
sess.query(User).filter(User.age > 27).update(
{users.c.age_int: User.age - 10}, synchronize_session="evaluate"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 19, 27])
def test_update_against_metadata(self):
User, users = self.classes.User, self.tables.users
sess = Session()
sess.query(users).update(
{users.c.age_int: 29}, synchronize_session=False
)
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([29, 29, 29, 29])),
)
def test_update_with_bindparams(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(text("age_int > :x")).params(x=29).update(
{"age": User.age - 10}, synchronize_session="fetch"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 37, 29, 27])),
)
def test_update_without_load(self):
User = self.classes.User
sess = Session()
sess.query(User).filter(User.id == 3).update(
{"age": 44}, synchronize_session="fetch"
)
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 47, 44, 37])),
)
def test_update_changes_resets_dirty(self):
User = self.classes.User
sess = Session(autoflush=False)
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
# autoflush is false. therefore our '50' and '37' are getting
# blown away by this operation.
sess.query(User).filter(User.age > 29).update(
{"age": User.age - 10}, synchronize_session="evaluate"
)
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
john.age = 25
assert john in sess.dirty
assert jack in sess.dirty
assert jill not in sess.dirty
assert not sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_changes_with_autoflush(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
john.age = 50
jack.age = 37
sess.query(User).filter(User.age > 29).update(
{"age": User.age - 10}, synchronize_session="evaluate"
)
for x in (john, jack, jill, jane):
assert not sess.is_modified(x)
eq_([john.age, jack.age, jill.age, jane.age], [40, 27, 29, 27])
john.age = 25
assert john in sess.dirty
assert jack not in sess.dirty
assert jill not in sess.dirty
assert sess.is_modified(john)
assert not sess.is_modified(jack)
def test_update_with_expire_strategy(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).filter(User.age > 29).update(
{"age": User.age - 10}, synchronize_session="fetch"
)
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 37, 29, 27])),
)
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_update_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = (
sess.query(User)
.filter(User.age > 29)
.update({"age": User.age + 0})
)
eq_(rowcount, 2)
rowcount = (
sess.query(User)
.filter(User.age > 29)
.update({"age": User.age - 10})
)
eq_(rowcount, 2)
@testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount)
def test_delete_returns_rowcount(self):
User = self.classes.User
sess = Session()
rowcount = (
sess.query(User)
.filter(User.age > 26)
.delete(synchronize_session=False)
)
eq_(rowcount, 3)
def test_update_all(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).update({"age": 42}, synchronize_session="evaluate")
eq_([john.age, jack.age, jill.age, jane.age], [42, 42, 42, 42])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([42, 42, 42, 42])),
)
def test_delete_all(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).delete(synchronize_session="evaluate")
assert not (
john in sess or jack in sess or jill in sess or jane in sess
)
eq_(sess.query(User).count(), 0)
def test_autoflush_before_evaluate_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
john.name = "j2"
sess.query(User).filter_by(name="j2").update(
{"age": 42}, synchronize_session="evaluate"
)
eq_(john.age, 42)
def test_autoflush_before_fetch_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
john.name = "j2"
sess.query(User).filter_by(name="j2").update(
{"age": 42}, synchronize_session="fetch"
)
eq_(john.age, 42)
def test_autoflush_before_evaluate_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
john.name = "j2"
sess.query(User).filter_by(name="j2").delete(
synchronize_session="evaluate"
)
assert john not in sess
def test_autoflush_before_fetch_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
john.name = "j2"
sess.query(User).filter_by(name="j2").delete(
synchronize_session="fetch"
)
assert john not in sess
def test_evaluate_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
sess.expire(john, ["age"])
# eval must be before the update. otherwise
# we eval john, age has been expired and doesn't
# match the new value coming in
sess.query(User).filter_by(name="john").filter_by(age=25).update(
{"name": "j2", "age": 40}, synchronize_session="evaluate"
)
eq_(john.name, "j2")
eq_(john.age, 40)
def test_fetch_before_update(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
sess.expire(john, ["age"])
sess.query(User).filter_by(name="john").filter_by(age=25).update(
{"name": "j2", "age": 40}, synchronize_session="fetch"
)
eq_(john.name, "j2")
eq_(john.age, 40)
def test_evaluate_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
sess.expire(john, ["age"])
sess.query(User).filter_by(name="john").filter_by(age=25).delete(
synchronize_session="evaluate"
)
assert john not in sess
def test_fetch_before_delete(self):
User = self.classes.User
sess = Session()
john = sess.query(User).filter_by(name="john").one()
sess.expire(john, ["age"])
sess.query(User).filter_by(name="john").filter_by(age=25).delete(
synchronize_session="fetch"
)
assert john not in sess
def test_update_unordered_dict(self):
User = self.classes.User
session = Session()
# Do an update using unordered dict and check that the parameters used
# are ordered in table order
q = session.query(User)
with mock.patch.object(q, "_execute_crud") as exec_:
q.filter(User.id == 15).update({"name": "foob", "id": 123})
# Confirm that parameters are a dict instead of tuple or list
params_type = type(exec_.mock_calls[0][1][0].parameters)
is_(params_type, dict)
def test_update_preserve_parameter_order(self):
User = self.classes.User
session = Session()
# Do update using a tuple and check that order is preserved
q = session.query(User)
with mock.patch.object(q, "_execute_crud") as exec_:
q.filter(User.id == 15).update(
(("id", 123), ("name", "foob")),
update_args={"preserve_parameter_order": True},
)
cols = [
c.key for c in exec_.mock_calls[0][1][0]._parameter_ordering
]
eq_(["id", "name"], cols)
# Now invert the order and use a list instead, and check that order is
# also preserved
q = session.query(User)
with mock.patch.object(q, "_execute_crud") as exec_:
q.filter(User.id == 15).update(
[("name", "foob"), ("id", 123)],
update_args={"preserve_parameter_order": True},
)
cols = [
c.key for c in exec_.mock_calls[0][1][0]._parameter_ordering
]
eq_(["name", "id"], cols)
class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(32)),
Column("age", Integer),
)
Table(
"documents",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("title", String(32)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Document(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[
dict(id=1, name="john", age=25),
dict(id=2, name="jack", age=47),
dict(id=3, name="jill", age=29),
dict(id=4, name="jane", age=37),
],
)
documents = cls.tables.documents
connection.execute(
documents.insert(),
[
dict(id=1, user_id=1, title="foo"),
dict(id=2, user_id=1, title="bar"),
dict(id=3, user_id=2, title="baz"),
],
)
@classmethod
def setup_mappers(cls):
documents, Document, User, users = (
cls.tables.documents,
cls.classes.Document,
cls.classes.User,
cls.tables.users,
)
mapper(User, users)
mapper(
Document,
documents,
properties={
"user": relationship(
User,
lazy="joined",
backref=backref("documents", lazy="select"),
)
},
)
def test_update_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
foo, bar, baz = sess.query(Document).order_by(Document.id).all()
sess.query(Document).filter(Document.user_id == 1).update(
{"title": Document.title + Document.title},
synchronize_session="fetch",
)
eq_([foo.title, bar.title, baz.title], ["foofoo", "barbar", "baz"])
eq_(
sess.query(Document.title).order_by(Document.id).all(),
list(zip(["foofoo", "barbar", "baz"])),
)
def test_update_with_explicit_joinedload(self):
User = self.classes.User
sess = Session()
john, jack, jill, jane = sess.query(User).order_by(User.id).all()
sess.query(User).options(joinedload(User.documents)).filter(
User.age > 29
).update({"age": User.age - 10}, synchronize_session="fetch")
eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27])
eq_(
sess.query(User.age).order_by(User.id).all(),
list(zip([25, 37, 29, 27])),
)
def test_delete_with_eager_relationships(self):
Document = self.classes.Document
sess = Session()
sess.query(Document).filter(Document.user_id == 1).delete(
synchronize_session=False
)
eq_(sess.query(Document.title).all(), list(zip(["baz"])))
class UpdateDeleteFromTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("samename", String(10)),
)
Table(
"documents",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", None, ForeignKey("users.id")),
Column("title", String(32)),
Column("flag", Boolean),
Column("samename", String(10)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Document(cls.Comparable):
pass
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(), [dict(id=1), dict(id=2), dict(id=3), dict(id=4)]
)
documents = cls.tables.documents
connection.execute(
documents.insert(),
[
dict(id=1, user_id=1, title="foo"),
dict(id=2, user_id=1, title="bar"),
dict(id=3, user_id=2, title="baz"),
dict(id=4, user_id=2, title="hoho"),
dict(id=5, user_id=3, title="lala"),
dict(id=6, user_id=3, title="bleh"),
],
)
@classmethod
def setup_mappers(cls):
documents, Document, User, users = (
cls.tables.documents,
cls.classes.Document,
cls.classes.User,
cls.tables.users,
)
mapper(User, users)
mapper(
Document,
documents,
properties={"user": relationship(User, backref="documents")},
)
@testing.requires.update_from
def test_update_from_joined_subq_test(self):
Document = self.classes.Document
s = Session()
subq = (
s.query(func.max(Document.title).label("title"))
.group_by(Document.user_id)
.subquery()
)
s.query(Document).filter(Document.title == subq.c.title).update(
{"flag": True}, synchronize_session=False
)
eq_(
set(s.query(Document.id, Document.flag)),
set(
[
(1, True),
(2, None),
(3, None),
(4, True),
(5, True),
(6, None),
]
),
)
@testing.requires.delete_from
def test_delete_from_joined_subq_test(self):
Document = self.classes.Document
s = Session()
subq = (
s.query(func.max(Document.title).label("title"))
.group_by(Document.user_id)
.subquery()
)
s.query(Document).filter(Document.title == subq.c.title).delete(
synchronize_session=False
)
eq_(
set(s.query(Document.id, Document.flag)),
set([(2, None), (3, None), (6, None)]),
)
def test_no_eval_against_multi_table_criteria(self):
User = self.classes.User
Document = self.classes.Document
s = Session()
q = s.query(User).filter(User.id == Document.user_id)
assert_raises_message(
exc.InvalidRequestError,
"Could not evaluate current criteria in Python.",
q.update,
{"name": "ed"},
)
@testing.requires.update_where_target_in_subquery
def test_update_using_in(self):
Document = self.classes.Document
s = Session()
subq = (
s.query(func.max(Document.title).label("title"))
.group_by(Document.user_id)
.subquery()
)
s.query(Document).filter(Document.title.in_(subq)).update(
{"flag": True}, synchronize_session=False
)
eq_(
set(s.query(Document.id, Document.flag)),
set(
[
(1, True),
(2, None),
(3, None),
(4, True),
(5, True),
(6, None),
]
),
)
@testing.requires.update_where_target_in_subquery
@testing.requires.standalone_binds
def test_update_using_case(self):
Document = self.classes.Document
s = Session()
subq = (
s.query(func.max(Document.title).label("title"))
.group_by(Document.user_id)
.subquery()
)
# this would work with Firebird if you do literal_column('1')
# instead
case_stmt = case([(Document.title.in_(subq), True)], else_=False)
s.query(Document).update(
{"flag": case_stmt}, synchronize_session=False
)
eq_(
set(s.query(Document.id, Document.flag)),
set(
[
(1, True),
(2, False),
(3, False),
(4, True),
(5, True),
(6, False),
]
),
)
@testing.only_on("mysql", "Multi table update")
def test_update_from_multitable_same_names(self):
Document = self.classes.Document
User = self.classes.User
s = Session()
s.query(Document).filter(User.id == Document.user_id).filter(
User.id == 2
).update(
{Document.samename: "d_samename", User.samename: "u_samename"},
synchronize_session=False,
)
eq_(
s.query(User.id, Document.samename, User.samename)
.filter(User.id == Document.user_id)
.order_by(User.id)
.all(),
[
(1, None, None),
(1, None, None),
(2, "d_samename", "u_samename"),
(2, "d_samename", "u_samename"),
(3, None, None),
(3, None, None),
],
)
class ExpressionUpdateTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("counter", Integer, nullable=False, default=0),
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
data = cls.tables.data
mapper(cls.classes.Data, data, properties={"cnt": data.c.counter})
@testing.provide_metadata
def test_update_attr_names(self):
Data = self.classes.Data
d1 = Data()
sess = Session()
sess.add(d1)
sess.commit()
eq_(d1.cnt, 0)
sess.query(Data).update({Data.cnt: Data.cnt + 1})
sess.flush()
eq_(d1.cnt, 1)
sess.query(Data).update({Data.cnt: Data.cnt + 1}, "fetch")
sess.flush()
eq_(d1.cnt, 2)
sess.close()
def test_update_args(self):
Data = self.classes.Data
session = testing.mock.Mock(wraps=Session())
update_args = {"mysql_limit": 1}
q = session.query(Data)
with testing.mock.patch.object(q, "_execute_crud") as exec_:
q.update({Data.cnt: Data.cnt + 1}, update_args=update_args)
eq_(exec_.call_count, 1)
args, kwargs = exec_.mock_calls[0][1:3]
eq_(len(args), 2)
update_stmt = args[0]
eq_(update_stmt.dialect_kwargs, update_args)
class InheritTest(fixtures.DeclarativeMappedTest):
run_inserts = "each"
run_deletes = "each"
__backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Person(Base):
__tablename__ = "person"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(50))
name = Column(String(50))
class Engineer(Person):
__tablename__ = "engineer"
id = Column(Integer, ForeignKey("person.id"), primary_key=True)
engineer_name = Column(String(50))
class Manager(Person):
__tablename__ = "manager"
id = Column(Integer, ForeignKey("person.id"), primary_key=True)
manager_name = Column(String(50))
@classmethod
def insert_data(cls, connection):
Engineer, Person, Manager = (
cls.classes.Engineer,
cls.classes.Person,
cls.classes.Manager,
)
s = Session(connection)
s.add_all(
[
Engineer(name="e1", engineer_name="e1"),
Manager(name="m1", manager_name="m1"),
Engineer(name="e2", engineer_name="e2"),
Person(name="p1"),
]
)
s.commit()
def test_illegal_metadata(self):
person = self.classes.Person.__table__
engineer = self.classes.Engineer.__table__
sess = Session()
assert_raises_message(
exc.InvalidRequestError,
"This operation requires only one Table or entity be "
"specified as the target.",
sess.query(person.join(engineer)).update,
{},
)
def test_update_subtable_only(self):
Engineer = self.classes.Engineer
s = Session(testing.db)
s.query(Engineer).update({"engineer_name": "e5"})
eq_(s.query(Engineer.engineer_name).all(), [("e5",), ("e5",)])
@testing.requires.update_from
def test_update_from(self):
Engineer = self.classes.Engineer
Person = self.classes.Person
s = Session(testing.db)
s.query(Engineer).filter(Engineer.id == Person.id).filter(
Person.name == "e2"
).update({"engineer_name": "e5"})
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
set([("e1", "e1"), ("e2", "e5")]),
)
@testing.requires.delete_from
def test_delete_from(self):
Engineer = self.classes.Engineer
Person = self.classes.Person
s = Session(testing.db)
s.query(Engineer).filter(Engineer.id == Person.id).filter(
Person.name == "e2"
).delete()
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
set([("e1", "e1")]),
)
@testing.only_on("mysql", "Multi table update")
def test_update_from_multitable(self):
Engineer = self.classes.Engineer
Person = self.classes.Person
s = Session(testing.db)
s.query(Engineer).filter(Engineer.id == Person.id).filter(
Person.name == "e2"
).update({Person.name: "e22", Engineer.engineer_name: "e55"})
eq_(
set(s.query(Person.name, Engineer.engineer_name)),
set([("e1", "e1"), ("e22", "e55")]),
)
| apache-2.0 |
panxia6679/ursula | library/modprobe.py | 36 | 3566 | #!/usr/bin/python
#coding: utf-8 -*-
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: modprobe
short_description: Add or remove kernel modules
requirements: []
version_added: 1.4
description:
- Add or remove kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
required: false
default: "present"
choices: [ present, absent, probe ]
description:
- Whether the module should be present or absent.
params:
required: false
default: ""
version_added: "1.6"
description:
- Modules parameters.
'''
EXAMPLES = '''
# Add the 802.1q module
- modprobe: name=8021q state=present
# Add the dummy module
- modprobe: name=dummy state=present params="numdummies=2"
'''
def main():
module = AnsibleModule(
argument_spec={
'name': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent', 'probe']},
'params': {'default': ''},
},
supports_check_mode=True,
)
args = {
'changed': False,
'failed': False,
'name': module.params['name'],
'state': module.params['state'],
'params': module.params['params'],
}
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = args['name'].replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError, e:
module.fail_json(msg=str(e), **args)
# Check only; don't modify
if module.check_mode:
if args['state'] == 'present' and not present:
changed = True
elif args['state'] == 'absent' and present:
changed = True
else:
changed = False
module.exit_json(changed=changed)
# Add/remove module as needed
if args['state'] == 'present':
if not present:
rc, _, err = module.run_command(['modprobe', args['name'], args['params']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
elif args['state'] == 'probe':
if not present:
rc, _, err = module.run_command(['modprobe', args['name'], args['params']])
if rc == 0:
args['changed'] = True
elif rc != 0 and 'No such device' not in err:
module.fail_json(msg=err, **args)
elif args['state'] == 'absent':
if present:
rc, _, err = module.run_command(['rmmod', args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
main()
| mit |
xpansa/pmis | stock_analytic_account/model/stock_picking.py | 2 | 1611 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_picking(osv.osv):
_inherit = "stock.picking"
_columns = {
'analytic_account_ids': fields.related(
'move_lines', 'analytic_account_id', type='many2many',
relation='account.analytic.account', string='Analytic Account',
readonly=True
),
'analytic_account_user_ids': fields.related(
'move_lines',
'analytic_account_user_id',
type='many2many',
relation='res.users',
string='Project Manager',
readonly=True
),
}
| agpl-3.0 |
pombreda/labyrinth | src/PeriodicSaveThread.py | 5 | 1160 | # PeriodicSaveThread.py
# This file is part of Labyrinth
#
# Copyright (C) 2008 - Labyrinth-Dev-Team
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import threading
import time
class PeriodicSaveThread(threading.Thread):
def __init__(self, main_area):
threading.Thread.__init__(self)
self.main_area = main_area
self.cancel = False
def run (self):
time.sleep (60)
while not self.cancel:
self.main_area.save_thyself ()
time.sleep (60)
| gpl-2.0 |
caktus/rapidsms-appointments | appointments/models.py | 1 | 4145 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now
except ImportError: # Django < 1.4
now = datetime.datetime.now
class Timeline(models.Model):
"A series of milestones which users can subscribe for milestone reminders."
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255, help_text=_('The keyword(s) to match '
'in messages from the user. Specify multiple keywords by separating them '
'with vertical bars. e.g., "birth|bith|bilth"'))
def __unicode__(self):
return self.name
@property
def keywords(self):
return map(lambda k: k.strip().lower(), self.slug.split('|'))
class TimelineSubscription(models.Model):
"Subscribing a user to a timeline of reminders."
timeline = models.ForeignKey(Timeline, related_name='subscribers')
connection = models.ForeignKey('rapidsms.Connection', related_name='timelines')
pin = models.CharField(max_length=160, help_text=_('Name, phrase, or digits used when joining the timeline.'))
start = models.DateTimeField(_('start date'), default=now)
end = models.DateTimeField(_('end date'), default=None, null=True)
def __unicode__(self):
return '%s - %s' % (self.connection, self.timeline)
class Milestone(models.Model):
"A point on the timeline that needs an appointment."
name = models.CharField(max_length=255)
timeline = models.ForeignKey(Timeline, related_name='milestones')
offset = models.IntegerField()
def __unicode__(self):
return self.name
class Appointment(models.Model):
"Instance of a subscribed user hitting a milestone."
STATUS_DEFAULT = 1
STATUS_SAW = 2
STATUS_MISSED = 3
STATUS_CHOICES = [
(STATUS_DEFAULT, _('Not Yet Occurred')),
(STATUS_SAW, _('Saw')),
(STATUS_MISSED, _('Missed')),
]
milestone = models.ForeignKey(Milestone, related_name='appointments')
subscription = models.ForeignKey(TimelineSubscription, related_name='appointments')
date = models.DateField(_('appointment date'))
confirmed = models.DateTimeField(blank=True, null=True, default=None)
reschedule = models.ForeignKey('self', blank=True, null=True, related_name='appointments')
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_DEFAULT)
notes = models.CharField(max_length=160, blank=True, default='')
def __unicode__(self):
return 'Appointment for %s on %s' % (self.subscription.connection, self.date.isoformat())
class Meta:
ordering = ['-date']
permissions = (
('view_appointment', 'Can View Appointments'),
)
class Notification(models.Model):
"Record of user notification for an appointment."
STATUS_SENT = 1
STATUS_CONFIRMED = 2
STATUS_MANUAL = 3
STATUS_ERROR = 4
STATUS_CHOICES = (
(STATUS_SENT, _('Sent')),
(STATUS_CONFIRMED, _('Confirmed')),
(STATUS_MANUAL, _('Manually Confirmed')),
(STATUS_ERROR, _('Error')),
)
appointment = models.ForeignKey(Appointment, related_name='notifications')
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_SENT)
sent = models.DateTimeField(blank=True, null=True, default=now)
confirmed = models.DateTimeField(blank=True, null=True, default=None)
message = models.CharField(max_length=160)
def __unicode__(self):
return 'Notification for %s on %s' %\
(self.appointment.subscription.connection, self.sent.isoformat())
def confirm(self, manual=False):
"Mark appointment as confirmed."
confirmed = now()
status = Notification.STATUS_MANUAL if manual else Notification.STATUS_CONFIRMED
self.confirmed = confirmed
self.status = status
Notification.objects.filter(pk=self.pk).update(confirmed=confirmed, status=status)
self.appointment.confirmed = confirmed
Appointment.objects.filter(pk=self.appointment_id).update(confirmed=confirmed)
| bsd-3-clause |
gojira/tensorflow | tensorflow/python/keras/applications/vgg16_test.py | 13 | 1842 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VGG16 application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.platform import test
class VGG16Test(test.TestCase):
def test_with_top(self):
model = keras.applications.VGG16(weights=None)
self.assertEqual(model.output_shape, (None, 1000))
def test_no_top(self):
model = keras.applications.VGG16(weights=None, include_top=False)
self.assertEqual(model.output_shape, (None, None, None, 512))
def test_with_pooling(self):
model = keras.applications.VGG16(weights=None,
include_top=False,
pooling='avg')
self.assertEqual(model.output_shape, (None, 512))
def test_weight_loading(self):
with self.assertRaises(ValueError):
keras.applications.VGG16(weights='unknown',
include_top=False)
with self.assertRaises(ValueError):
keras.applications.VGG16(weights='imagenet',
classes=2000)
if __name__ == '__main__':
test.main()
| apache-2.0 |
thanhacun/odoo | addons/event/wizard/__init__.py | 435 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wrouesnel/ansible | lib/ansible/modules/net_tools/lldp.py | 47 | 2503 | #!/usr/bin/python -tt
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lldp
requirements: [ lldpctl ]
version_added: 1.6
short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''
EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
'''
from ansible.module_utils.basic import AnsibleModule
def gather_lldp(module):
cmd = ['lldpctl', '-f', 'keyvalue']
rc, output, err = module.run_command(cmd)
if output:
output_dict = {}
current_dict = {}
lldp_entries = output.split("\n")
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
else:
value = current_dict[final] + '\n' + entry
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
lldp_output = gather_lldp(module)
try:
data = {'lldp': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
if __name__ == '__main__':
main()
| gpl-3.0 |
nuagenetworks/vspk-python | vspk/v5_0/nusubnettemplate.py | 1 | 21014 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUAddressRangesFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUSubnetsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUSubnetTemplate(NURESTObject):
""" Represents a SubnetTemplate in the VSD
Notes:
As domain and zone objects, subnet objects are created in VSD as derived by templates. This object describes the subnet template.
"""
__rest_name__ = "subnettemplate"
__resource_name__ = "subnettemplates"
## Constants
CONST_USE_GLOBAL_MAC_DISABLED = "DISABLED"
CONST_USE_GLOBAL_MAC_ENABLED = "ENABLED"
CONST_MULTICAST_DISABLED = "DISABLED"
CONST_ENCRYPTION_ENABLED = "ENABLED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENCRYPTION_DISABLED = "DISABLED"
CONST_USE_GLOBAL_MAC_ENTERPRISE_DEFAULT = "ENTERPRISE_DEFAULT"
CONST_DPI_ENABLED = "ENABLED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_DPI_INHERITED = "INHERITED"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_MULTICAST_ENABLED = "ENABLED"
CONST_MULTICAST_INHERITED = "INHERITED"
CONST_DPI_DISABLED = "DISABLED"
CONST_ENCRYPTION_INHERITED = "INHERITED"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
def __init__(self, **kwargs):
""" Initializes a SubnetTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> subnettemplate = NUSubnetTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'SubnetTemplate')
>>> subnettemplate = NUSubnetTemplate(data=my_dict)
"""
super(NUSubnetTemplate, self).__init__()
# Read/Write Attributes
self._dpi = None
self._ip_type = None
self._ipv6_address = None
self._ipv6_gateway = None
self._name = None
self._last_updated_by = None
self._gateway = None
self._address = None
self._description = None
self._netmask = None
self._encryption = None
self._entity_scope = None
self._split_subnet = None
self._proxy_arp = None
self._use_global_mac = None
self._associated_multicast_channel_map_id = None
self._multicast = None
self._external_id = None
self._dynamic_ipv6_address = None
self.expose_attribute(local_name="dpi", remote_name="DPI", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_gateway", remote_name="IPv6Gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="encryption", remote_name="encryption", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="split_subnet", remote_name="splitSubnet", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="proxy_arp", remote_name="proxyARP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_global_mac", remote_name="useGlobalMAC", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENTERPRISE_DEFAULT'])
self.expose_attribute(local_name="associated_multicast_channel_map_id", remote_name="associatedMulticastChannelMapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="multicast", remote_name="multicast", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="dynamic_ipv6_address", remote_name="dynamicIpv6Address", attribute_type=bool, is_required=False, is_unique=False)
# Fetchers
self.address_ranges = NUAddressRangesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.subnets = NUSubnetsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def dpi(self):
""" Get dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
return self._dpi
@dpi.setter
def dpi(self, value):
""" Set dpi value.
Notes:
determines whether or not Deep packet inspection is enabled
This attribute is named `DPI` in VSD API.
"""
self._dpi = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4 or DUALSTACK
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4 or DUALSTACK
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IPv6 range of the subnet. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IPv6 range of the subnet. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def ipv6_gateway(self):
""" Get ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
return self._ipv6_gateway
@ipv6_gateway.setter
def ipv6_gateway(self, value):
""" Set ipv6_gateway value.
Notes:
The IPv6 address of the gateway of this subnet
This attribute is named `IPv6Gateway` in VSD API.
"""
self._ipv6_gateway = value
@property
def name(self):
""" Get name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def gateway(self):
""" Get gateway value.
Notes:
The IP address of the gateway of this subnet
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
The IP address of the gateway of this subnet
"""
self._gateway = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
self._address = value
@property
def description(self):
""" Get description value.
Notes:
A description field provided by the user that identifies the subnet
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description field provided by the user that identifies the subnet
"""
self._description = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet defined
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet defined
"""
self._netmask = value
@property
def encryption(self):
""" Get encryption value.
Notes:
Determines whether or not IPSEC is enabled. Possible values are INHERITED, ENABLED, DISABLED, .
"""
return self._encryption
@encryption.setter
def encryption(self, value):
""" Set encryption value.
Notes:
Determines whether or not IPSEC is enabled. Possible values are INHERITED, ENABLED, DISABLED, .
"""
self._encryption = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def split_subnet(self):
""" Get split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
return self._split_subnet
@split_subnet.setter
def split_subnet(self, value):
""" Set split_subnet value.
Notes:
Block subnet routes
This attribute is named `splitSubnet` in VSD API.
"""
self._split_subnet = value
@property
def proxy_arp(self):
""" Get proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
return self._proxy_arp
@proxy_arp.setter
def proxy_arp(self, value):
""" Set proxy_arp value.
Notes:
When set, VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
self._proxy_arp = value
@property
def use_global_mac(self):
""" Get use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
return self._use_global_mac
@use_global_mac.setter
def use_global_mac(self, value):
""" Set use_global_mac value.
Notes:
if this flag is enabled, the system configured globalMACAddress will be used as the gateway mac address
This attribute is named `useGlobalMAC` in VSD API.
"""
self._use_global_mac = value
@property
def associated_multicast_channel_map_id(self):
""" Get associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
return self._associated_multicast_channel_map_id
@associated_multicast_channel_map_id.setter
def associated_multicast_channel_map_id(self, value):
""" Set associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
self._associated_multicast_channel_map_id = value
@property
def multicast(self):
""" Get multicast value.
Notes:
Indicates multicast policy on Subnet/Subnet Template.
"""
return self._multicast
@multicast.setter
def multicast(self, value):
""" Set multicast value.
Notes:
Indicates multicast policy on Subnet/Subnet Template.
"""
self._multicast = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def dynamic_ipv6_address(self):
""" Get dynamic_ipv6_address value.
Notes:
Turn on or off dynamic allocation of IPV6 address
This attribute is named `dynamicIpv6Address` in VSD API.
"""
return self._dynamic_ipv6_address
@dynamic_ipv6_address.setter
def dynamic_ipv6_address(self, value):
""" Set dynamic_ipv6_address value.
Notes:
Turn on or off dynamic allocation of IPV6 address
This attribute is named `dynamicIpv6Address` in VSD API.
"""
self._dynamic_ipv6_address = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return True
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return False
| bsd-3-clause |
shaftoe/home-assistant | homeassistant/components/light/sensehat.py | 15 | 3087 | """
Support for Sense Hat LEDs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.sensehat/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, ATTR_RGB_COLOR, SUPPORT_RGB_COLOR,
Light, PLATFORM_SCHEMA)
from homeassistant.const import CONF_NAME
REQUIREMENTS = ['sense-hat==2.2.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_SENSEHAT = (SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR)
DEFAULT_NAME = 'sensehat'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sense Hat Light platform."""
from sense_hat import SenseHat
sensehat = SenseHat()
name = config.get(CONF_NAME)
add_devices([SenseHatLight(sensehat, name)])
class SenseHatLight(Light):
"""Representation of an Sense Hat Light."""
def __init__(self, sensehat, name):
"""Initialize an Sense Hat Light.
Full brightness and white color.
"""
self._sensehat = sensehat
self._name = name
self._is_on = False
self._brightness = 255
self._rgb_color = [255, 255, 255]
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Read back the brightness of the light."""
return self._brightness
@property
def rgb_color(self):
"""Read back the color of the light.
Returns [r, g, b] list with values in range of 0-255.
"""
return self._rgb_color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_SENSEHAT
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def should_poll(self):
"""Return if we should poll this device."""
return False
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
def turn_on(self, **kwargs):
"""Instruct the light to turn on and set correct brightness & color."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
percent_bright = (self._brightness / 255)
if ATTR_RGB_COLOR in kwargs:
self._rgb_color = kwargs[ATTR_RGB_COLOR]
self._sensehat.clear(int(self._rgb_color[0] * percent_bright),
int(self._rgb_color[1] * percent_bright),
int(self._rgb_color[2] * percent_bright))
self._is_on = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._sensehat.clear()
self._is_on = False
self.schedule_update_ha_state()
| apache-2.0 |
ddurdle/KODI-OneDrive | resources/lib/crashreport.py | 3 | 4141 | '''
Copyright (C) 2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import urllib, urllib2
import cookielib
import random
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
#
#
#
class crashreport:
##
##
def __init__(self, addon):
self.addon = addon
self.pluginName = self.addon.getAddonInfo('name') + ' - ' + self.addon.getAddonInfo('id')
self.pluginVersion = self.addon.getAddonInfo('version')
self.xbmcVersion = self.addon.getSetting('crashreport_version') + ' - ' + self.addon.getSetting('crashreport_os')
try:
self.identifier = (int)(self.addon.getSetting('crashreport_ident'))
if self.identifier < 1:
self.identifier = random.randint(1, 10000)
except:
self.identifier = random.randint(1, 10000)
try:
self.email = self.addon.getSetting('crashreport_email')
if self.addon.getSetting('crashreport_enable') == 'true':
self.enabled = True
self.cookiejar = cookielib.CookieJar()
else:
self.enabled = False
except:
self.enabled = False
try:
if self.addon.getSetting('crashreport_debug') == 'true':
self.debug = True
else:
self.debug = False
except:
self.debug = False
def sendError(self, error, details, isDebug=False):
if ((self.enabled == True and not isDebug) or (self.debug and isDebug)):
diaglog = xbmcgui.Dialog()
result = diaglog.yesno(self.addon.getLocalizedString(30000), self.addon.getLocalizedString(30070), self.addon.getLocalizedString(30071),self.addon.getLocalizedString(30072))
if result == True:
self.identifier = self.identifier + 1
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookiejar))
url = 'https://docs.google.com/forms/d/1cjcbODdHz-min7XSvyN5Jr94QBp1hxW8NeKdst3Uss8/formResponse'
request = urllib2.Request(url)
self.cookiejar.add_cookie_header(request)
data = {}
data['entry.1628723602'] = str(self.identifier)
data['entry.2060634433'] = str(self.pluginName)
data['entry.2106106416'] = str(self.pluginVersion)
data['entry.1751515217'] = str(self.email)
data['entry.519109288'] = str(self.xbmcVersion)
data['entry.478215099'] = str(details)
data['entry.782599917'] = str(error)
url_values = urllib.urlencode(data)
# try login
try:
# response = opener.open(request,'entry.977603264='+str(self.identifier)+'&entry.243770882='+str(self.pluginName)+'&entry.878700058='+str(self.pluginVersion)+'&entry.1258581285='+str(self.email)+'&entry.1260404759='+str(self.xbmcVersion)+'&entry.1753855090='+str(details)+'&entry.671238889='+str(error))
response = opener.open(request,url_values)
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
return
response.close()
xbmcgui.Dialog().ok(self.addon.getLocalizedString(30000), self.addon.getLocalizedString(30073), str(self.identifier))
self.addon.setSetting('crashreport_ident', (str)(self.identifier))
| gpl-2.0 |
mihow/headers-to-image | app.py | 1 | 11813 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import StringIO
import datetime as dt
import random
import logging
import md5
import boto3
from flask import Flask, request, send_file, redirect, url_for, jsonify, render_template_string
from flask.json import JSONEncoder
from raven.contrib.flask import Sentry
from PIL import Image, ImageDraw
from nocache import nocache
from dotenv import load_dotenv, find_dotenv
# Logging configuration
logging.basicConfig(format='%(levelname)s:%(message)s')
log = logging.getLogger('footer')
log.setLevel(logging.INFO)
# Load environment vars from '.env.' file
load_dotenv(find_dotenv())
# Initialize Flask app
app = Flask(__name__)
# Alias for running directly with mod_wsgi
application = app
# Configure remote logging
sentry = Sentry(app,
logging=True,
level=logging.INFO,
dsn=os.environ.get('SENTRY_DSN'))
# Initialize Amazon Web Services
ses = boto3.client('ses')
# Static app variables
SOURCE_CITY = "New York"
def cache_buster():
return random.randint(00000000000, 999999999999)
def mask_sensitive_data(data):
sensi_keys = ['KEY', 'PASS', '_ID', '-ID',]
if hasattr(data, 'items'):
for k,v, in data.items():
if hasattr(v, 'items'):
v = mask_sensitive_data(v)
else:
for sk in sensi_keys:
if sk in k.upper():
data[k] = "*******"
return data
def request_data():
data = {}
data['headers'] = dict(request.headers) # HTTP headers
data['query_args'] = dict(request.args) # GET & POST vars
data['environ'] = dict(request.environ)
data['sensitive_test'] = {'TEST_API_KEY': 1234567878999,
'TEST_PASSWORD': 'happybirthday'}
data = mask_sensitive_data(data)
return data
def summarize(data):
#@TODO add random background color
keys_of_interest = [
"USER-AGENT",
"HOST",
"HTTP_REFERER",
"REFERER",
"REMOTE_ADDR",
"X-FORWARDED-FOR",
"HTTP_X_FORWARDED_FOR",
"REQUEST_URI",
]
summary = {}
summary['TIMESTAMP'] = dt.datetime.now()
if hasattr(data, 'items'):
for k,v, in data.items():
if hasattr(v, 'items'):
for kk in v:
if kk.upper() in keys_of_interest:
summary[kk] = v[kk]
else:
if k.upper() in keys_of_interest:
summary[k] = data[k]
return summary
data = mask_sensitive_data(data)
return data
def data_to_str(data):
tmpl = """
{% for k,v in data.items() %}{% if v.items %}{{ k }}:
{% for kk, vv in v.items() %}
{{ kk }}: {{ vv }}{% endfor %} {% else %}
{{ k }}: {{ v }} {% endif %}
{% endfor %}
"""
txt = render_template_string(tmpl, data=data)
txt = txt.encode('utf8')
return txt
def create_image(txt, width=1024, height=2048):
image = Image.new("RGBA", (width,height), (255,255,255))
draw = ImageDraw.Draw(image)
draw.text((10, 0), txt, (0,0,0))
return image
@nocache
def serve_image(pil_img):
img_io = StringIO.StringIO()
pil_img.save(img_io, 'JPEG', quality=90)
img_io.seek(0)
response = send_file(img_io, mimetype='image/jpeg')
# See https://emailexpert.org/gmail-tracking-changes-the-fix-what-you-need-to-know/
if 'GoogleImageProxy' in request.headers.get('User-Agent', ''):
# This works - we get a request, but we serve a broken image
# Can we log the unique request data, and then serve a redirect with a custom
# image?
# response.content_length = 0
pass
return response
@app.route('/request_data.jpg')
@nocache
def as_image():
txt = data_to_str(request_data())
img = create_image(txt)
return serve_image(img)
@app.route('/request_data.html')
def as_html():
data = request_data()
tmpl = """
<!doctype html>
<html><body style="font-family: monospace;">
<ul>
{% for k,v in data.items() %}
{% if v.items %}
<li>{{ k }}:
<ul>{% for kk, vv in v.items() %}
<li><b>{{ kk }}:</b> {{ vv }}</li>
{% endfor %}
</ul>
{% else %}
<li>{{ k }}: {{ v }}</li>
{% endif %}
{% endfor %}
</ul>
</body></html>
"""
return render_template_string(tmpl, data=data)
class CustomJSONEncoder(JSONEncoder):
def default(self, o):
return str(o)
app.json_encoder = CustomJSONEncoder
@app.route('/request_data.json')
def as_json():
data = request_data()
return jsonify(data), 200
@app.route('/')
def index():
return redirect(url_for('embed'))
@app.route('/summary.jpg')
@nocache
def summary_image():
txt = data_to_str(summarize(request_data()))
img = create_image(txt, height=600)
return serve_image(img)
@app.route('/summary.json')
def summary():
data = summarize(request_data())
return jsonify(data)
@app.route('/location.jpg')
@app.route('/location-<int:request_id>.jpg')
@nocache
def location_image(request_id=None):
source = SOURCE_CITY
destination = get_location(request_data())
language = get_client_language()
user_agent = request.headers.get('User-Agent')
if destination:
destination_name = destination.city.name
else:
destination_name = "Unknown"
txt = "{} => {} \r\n{} \r\n{}".format(
source, destination_name, language, user_agent)
log.info('Serving location image: {}'.format(txt))
img = create_image(txt, width=600, height=64)
return serve_image(img)
@app.route('/location.json')
def location():
data = get_location(request_data())
return jsonify(data)
@app.route('/embed')
def embed():
tmpl = """
<!doctype html>
<html><body style="
font-family:monospace;
font-size: medium;
width: 40em;
border: 1px solid blue;
padding: 2%;
margin: 4% auto;">
<h2>Select the image below and paste in your email body:</h2>
<p> </p>
<div style="text-align:center; background: #eee; width: 80%; margin: 0 auto;">
<p>[-- text before image --]<br><br></p>
<p>
<img src="{{ url_for('location_image', _external=True, request_id=buster1) }}"
title="Request data as image"
alt="This should be an image with HTTP headers, etc">
</p>
<p><br>[-- text after image --]</p>
</div>
<p> </p>
<h2>Send a test email:</h2>
<form action="/email" method="POST">
<p>
<input
name="email"
type="text"
placeholder="youremail@example.org"
style="width:95%" />
</p>
<p>
<input type="submit" value="Send!">
</p>
<p> </p>
<h2>Or here is html for the image tag you can use:</h2>
<p>
<input
type="text"
value='<img src="{{ url_for('location_image', request_id=buster2, _external=True) }}">'
style="width:95%" />
</p>
<h2>Links</h2>
<ul>
<li><a href="{{ url_for('as_html') }}">
Show request data as HTML</a></li>
<li><a href="{{ url_for('as_json') }}">
Show request data as JSON</a></li>
<li><a href="{{ url_for('location') }}">
Show locaton data as JSON</a></li>
</ul>
</body></html>
"""
return render_template_string(tmpl,
buster1=cache_buster(), buster2=cache_buster())
def get_client_language():
lang = request.accept_languages
# @TODO convert lang code to something more human friendly
return lang
def get_client_ip(request_dict):
if os.environ.get('FLASK_DEBUG'):
return '73.67.227.118'
potential_ip_keys = [
'HTTP_X_FORWARDED_FOR',
'X_FORWARDED_FOR',
'HTTP_CLIENT_IP',
'HTTP_X_REAL_IP',
'HTTP_X_FORWARDED',
'HTTP_X_CLUSTER_CLIENT_IP',
'HTTP_FORWARDED_FOR',
'HTTP_FORWARDED',
'HTTP_VIA',
'REMOTE_ADDR',
]
ignore_ip_prefixes = [
'0.', # externally non-routable
'10.', # class A private block
'169.254.', # link-local block
'172.16.', '172.17.', '172.18.', '172.19.',
'172.20.', '172.21.', '172.22.', '172.23.',
'172.24.', '172.25.', '172.26.', '172.27.',
'172.28.', '172.29.', '172.30.', '172.31.', # class B private blocks
'192.0.2.', # reserved for documentation and example code
'192.168.', # class C private block
'255.255.255.', # IPv4 broadcast address
'2001:db8:', # reserved for documentation and example code
'fc00:', # IPv6 private block
'fe80:', # link-local unicast
'ff00:', # IPv6 multicast
'127.', # IPv4 loopback device
'::1', # IPv6 loopback device
]
for key in potential_ip_keys:
match = request_dict['environ'].get(key)
if match:
for prefix in ignore_ip_prefixes:
if match.startswith(prefix):
continue
return match
return None
def get_location(request):
import geoip2.database
from geoip2.errors import AddressNotFoundError
# @TODO make more robust method of finding user's real IP
# https://github.com/mihow/django-ipware
ip_address = get_client_ip(request)
dbpath = os.path.join(os.path.dirname(__file__), 'GeoLite2-City.mmdb')
lookup = geoip2.database.Reader(dbpath)
try:
resp = lookup.city(ip_address)
# location_name = resp.most_specific.name
# print(location_name)
lookup.close()
return resp
except AddressNotFoundError:
return None
def ga_image_url(request_id, user_id, debug=True):
if debug:
base_url = 'https://www.google-analytics.com/debug/collect?v=1'
else:
base_url = 'https://www.google-analytics.com/collect?v=1'
url = ('{base_url}'
'&tid={ga_id}'
'&uid={user_id}'
'&cid={request_id}'
'&t=event&ec=email&ea=open'
'&dp=/email/{request_id}'
''.format(
base_url=base_url,
ga_id=os.environ.get('GOOGLE_ANALYTICS_ID'),
request_id=request_id,
user_id=user_id)
)
return url
@app.route('/email', methods=['POST'])
def send_email():
email = request.form['email']
body_tmpl = """
Hello! Your image is below: <br><br>
{% autoescape false %}
<img src="{{ footer_img_url }}"
alt="Image with your location, etc should be here."><br><br>
{% endautoescape %}
-Footer <br><br>
{% autoescape false %}
<img src="{{ google_image_url }}">
{% endautoescape %}
"""
request_id = cache_buster()
user_id = md5.md5(email).hexdigest()
debug=os.environ.get('FLASK_DEBUG', False)
body = render_template_string(
body_tmpl,
footer_img_url=url_for('location_image',
request_id=request_id,
_external=True),
google_image_url=ga_image_url(
request_id,
user_id,
debug=debug))
body = body.encode('utf8')
result = ses.send_email(
Source='footer@bunsen.town',
Destination={
'ToAddresses': [
email,
],
},
Message={
'Subject': {
'Data': 'Footer project test #{}'.format(cache_buster()),
},
'Body': {
#'Text': {
# 'Data': 'string',
# 'Charset': 'string'
#},
'Html': {
'Data': body,
}
}
},
)
status_code = result.get('ResponseMetadata').get('HTTPStatusCode')
if str(status_code).startswith('2'):
return "Success!"
else:
return "Something went wrong :("
| gpl-3.0 |
MPC-Berkeley/barc | Dator/data_api/migrations/0015_auto_20151029_2013.py | 3 | 1797 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('data_api', '0014_auto_20150901_1640'),
]
operations = [
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.CharField(max_length=128, db_index=True)),
('started_at', models.DateTimeField(null=True)),
('ended_at', models.DateTimeField(null=True)),
('name', models.CharField(max_length=128, db_index=True)),
('group', models.ManyToManyField(to='auth.Group')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blob',
name='experiment',
field=models.ForeignKey(to='data_api.Experiment', null=True),
),
migrations.AddField(
model_name='event',
name='experiment',
field=models.ForeignKey(to='data_api.Experiment', null=True),
),
migrations.AddField(
model_name='setting',
name='experiment',
field=models.ForeignKey(to='data_api.Experiment', null=True),
),
migrations.AddField(
model_name='signal',
name='experiment',
field=models.ForeignKey(to='data_api.Experiment', null=True),
),
]
| mit |
openstack/dragonflow | dragonflow/tests/unit/test_os_ken_base_app.py | 1 | 3398 | # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from dragonflow import conf as cfg
from dragonflow.switch.drivers.ovs import os_ken_base_app
from dragonflow.tests import base as tests_base
class TestOsKenDFAdapter(tests_base.BaseTestCase):
"""
This unit test has to verify that all events are called correctly, both
via the notify* functions, as well as the events called from os_ken.
Having os_ken call these events will be done in the functional tests.
"""
def setUp(self):
super(TestOsKenDFAdapter, self).setUp()
cfg.CONF.set_override(
'datapath_layout_path',
'etc/dragonflow_datapath_layout.yaml',
group='df',
)
self.os_ken_df_adapter = os_ken_base_app.OsKenDFAdapter(
switch_backend=mock.Mock(),
nb_api=mock.Mock(),
db_change_callback=mock.Mock())
self.mock_app = mock.Mock(spec=[
'router_updated',
'router_deleted',
'add_security_group_rule',
'remove_security_group_rule',
'switch_features_handler',
'port_desc_stats_reply_handler',
'packet_in_handler'
])
def dispatcher_load(*args, **kwargs):
self.os_ken_df_adapter.dispatcher.apps = {'mock': self.mock_app}
self.os_ken_df_adapter.dispatcher.load = dispatcher_load
self.os_ken_df_adapter.load()
def test_switch_features_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg = mock.Mock()
ev.msg.datapath = mock.Mock()
ev.msg.datapath.ofproto = mock.Mock()
ev.msg.datapath.ofproto.OFP_VERSION = 0x04
self.os_ken_df_adapter.switch_features_handler(ev)
self.mock_app.assert_has_calls([mock.call.switch_features_handler(ev)])
def test_port_desc_stats_reply_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
self.os_ken_df_adapter.port_desc_stats_reply_handler(ev)
self.mock_app.assert_has_calls([
mock.call.port_desc_stats_reply_handler(ev)])
def test_packet_in_handler(self):
self.mock_app.reset_mock()
ev = mock.Mock()
ev.msg.table_id = 10
self.mock_app.packet_in_handler.__name__ = 'mock'
self.os_ken_df_adapter.register_table_handler(
10, self.mock_app.packet_in_handler)
self.os_ken_df_adapter.OF_packet_in_handler(ev)
self.mock_app.assert_has_calls([mock.call.packet_in_handler(ev)])
def test_register_twice(self):
self.os_ken_df_adapter.register_table_handler(0, 0)
with testtools.ExpectedException(RuntimeError):
self.os_ken_df_adapter.register_table_handler(0, 0)
| apache-2.0 |
codefollower/Cassandra-Research | pylib/cqlshlib/test/test_cql_parsing.py | 4 | 35160 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from unittest import TestCase
from operator import itemgetter
from ..cql3handling import CqlRuleSet
class TestCqlParsing(TestCase):
def test_parse_string_literals(self):
for n in ["'eggs'", "'Sausage 1'", "'spam\nspam\n\tsausage'", "''"]:
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex(n)),
[(n, 'quotedStringLiteral')])
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex("'eggs'")),
[("'eggs'", 'quotedStringLiteral')])
def test_parse_numbers(self):
for n in ['6', '398', '18018']:
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex(n)),
[(n, 'wholenumber')])
def test_parse_uuid(self):
uuids = ['4feeae80-e9cc-11e4-b571-0800200c9a66',
'7142303f-828f-4806-be9e-7a973da0c3f9',
'dff8d435-9ca0-487c-b5d0-b0fe5c5768a8']
for u in uuids:
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex(u)),
[(u, 'uuid')])
def test_comments_in_string_literals(self):
comment_strings = ["'sausage -- comment'",
"'eggs and spam // comment string'",
"'spam eggs sausage and spam /* still in string'"]
for s in comment_strings:
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex(s)),
[(s, 'quotedStringLiteral')])
def test_colons_in_string_literals(self):
comment_strings = ["'Movie Title: The Movie'",
"':a:b:c:'",
"'(>>=) :: (Monad m) => m a -> (a -> m b) -> m b'"]
for s in comment_strings:
self.assertSequenceEqual(tokens_with_types(CqlRuleSet.lex(s)),
[(s, 'quotedStringLiteral')])
def test_partial_parsing(self):
[parsed] = CqlRuleSet.cql_parse('INSERT INTO ks.test')
self.assertSequenceEqual(parsed.matched, [])
self.assertSequenceEqual(tokens_with_types(parsed.remainder),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('test', 'identifier')])
def test_parse_select(self):
parsed = parse_cqlsh_statements('SELECT FROM ks.tab;')
self.assertSequenceEqual(tokens_with_types(parsed),
[('SELECT', 'identifier'),
('FROM', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements('SELECT FROM "MyTable";')
self.assertSequenceEqual(tokens_with_types(parsed),
[('SELECT', 'identifier'),
('FROM', 'identifier'),
('"MyTable"', 'quotedName'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'SELECT FROM tab WHERE foo = 3;')
self.assertSequenceEqual(tokens_with_types(parsed),
[('SELECT', 'identifier'),
('FROM', 'identifier'),
('tab', 'identifier'),
('WHERE', 'identifier'),
('foo', 'identifier'),
('=', 'op'),
('3', 'wholenumber'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'SELECT FROM tab ORDER BY event_id DESC LIMIT 1000')
self.assertSequenceEqual(tokens_with_types(parsed),
[('SELECT', 'identifier'),
('FROM', 'identifier'),
('tab', 'identifier'),
('ORDER', 'identifier'),
('BY', 'identifier'),
('event_id', 'identifier'),
('DESC', 'identifier'),
('LIMIT', 'identifier'),
('1000', 'wholenumber')])
parsed = parse_cqlsh_statements(
'SELECT FROM tab WHERE clustering_column > 200 '
'AND clustering_column < 400 ALLOW FILTERING')
self.assertSequenceEqual(tokens_with_types(parsed),
[('SELECT', 'identifier'),
('FROM', 'identifier'),
('tab', 'identifier'),
('WHERE', 'identifier'),
('clustering_column', 'identifier'),
('>', 'cmp'),
('200', 'wholenumber'),
('AND', 'identifier'),
('clustering_column', 'identifier'),
('<', 'cmp'),
('400', 'wholenumber'),
# 'allow' and 'filtering' are not keywords
('ALLOW', 'identifier'),
('FILTERING', 'identifier')])
def test_parse_insert(self):
parsed = parse_cqlsh_statements('INSERT INTO mytable (x) VALUES (2);')
self.assertSequenceEqual(tokens_with_types(parsed),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('mytable', 'identifier'),
('(', 'op'),
('x', 'identifier'),
(')', 'op'),
('VALUES', 'identifier'),
('(', 'op'),
('2', 'wholenumber'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"INSERT INTO mytable (x, y) VALUES (2, 'eggs');")
self.assertSequenceEqual(tokens_with_types(parsed),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('mytable', 'identifier'),
('(', 'op'),
('x', 'identifier'),
(',', 'op'),
('y', 'identifier'),
(')', 'op'),
('VALUES', 'identifier'),
('(', 'op'),
('2', 'wholenumber'),
(',', 'op'),
("'eggs'", 'quotedStringLiteral'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"INSERT INTO mytable (x, y) VALUES (2, 'eggs');")
self.assertSequenceEqual(tokens_with_types(parsed),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('mytable', 'identifier'),
('(', 'op'),
('x', 'identifier'),
(',', 'op'),
('y', 'identifier'),
(')', 'op'),
('VALUES', 'identifier'),
('(', 'op'),
('2', 'wholenumber'),
(',', 'op'),
("'eggs'", 'quotedStringLiteral'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"INSERT INTO mytable (ids) VALUES "
"(7ee251da-af52-49a4-97f4-3f07e406c7a7) "
"USING TTL 86400;")
self.assertSequenceEqual(tokens_with_types(parsed),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('mytable', 'identifier'),
('(', 'op'),
('ids', 'identifier'),
(')', 'op'),
('VALUES', 'identifier'),
('(', 'op'),
('7ee251da-af52-49a4-97f4-3f07e406c7a7', 'uuid'),
(')', 'op'),
('USING', 'identifier'),
('TTL', 'identifier'),
('86400', 'wholenumber'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"INSERT INTO test_table (username) VALUES ('Albert') "
"USING TIMESTAMP 1240003134 AND TTL 600;")
self.assertSequenceEqual(tokens_with_types(parsed),
[('INSERT', 'identifier'),
('INTO', 'identifier'),
('test_table', 'identifier'),
('(', 'op'),
('username', 'identifier'),
(')', 'op'),
('VALUES', 'identifier'),
('(', 'op'),
("'Albert'", 'quotedStringLiteral'),
(')', 'op'),
('USING', 'identifier'),
('TIMESTAMP', 'identifier'),
('1240003134', 'wholenumber'),
('AND', 'identifier'),
('TTL', 'identifier'),
('600', 'wholenumber'),
(';', 'endtoken')])
def test_parse_update(self):
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15 WHERE y = 'eggs';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'eggs'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab USING TTL 432000 SET x = 15 WHERE y = 'eggs';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('USING', 'identifier'),
('TTL', 'identifier'),
('432000', 'wholenumber'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'eggs'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15, y = 'sausage' "
"WHERE y = 'eggs';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
(',', 'op'),
('y', 'identifier'),
('=', 'op'),
("'sausage'", 'quotedStringLiteral'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'eggs'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15 "
"WHERE y IN ('eggs', 'sausage', 'spam');")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('IN', 'identifier'),
('(', 'op'),
("'eggs'", 'quotedStringLiteral'),
(',', 'op'),
("'sausage'", 'quotedStringLiteral'),
(',', 'op'),
("'spam'", 'quotedStringLiteral'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15 "
"WHERE y = 'spam' if z = 'sausage';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'spam'", 'quotedStringLiteral'),
('if', 'identifier'),
('z', 'identifier'),
('=', 'op'),
("'sausage'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15 WHERE y = 'spam' "
"if z = 'sausage' AND w = 'spam';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'spam'", 'quotedStringLiteral'),
('if', 'identifier'),
('z', 'identifier'),
('=', 'op'),
("'sausage'", 'quotedStringLiteral'),
('AND', 'identifier'),
('w', 'identifier'),
('=', 'op'),
("'spam'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"UPDATE tab SET x = 15 WHERE y = 'spam' IF EXISTS")
self.assertSequenceEqual(tokens_with_types(parsed),
[('UPDATE', 'identifier'),
('tab', 'identifier'),
('SET', 'identifier'),
('x', 'identifier'),
('=', 'op'),
('15', 'wholenumber'),
('WHERE', 'identifier'),
('y', 'identifier'),
('=', 'op'),
("'spam'", 'quotedStringLiteral'),
('IF', 'identifier'),
('EXISTS', 'identifier')])
def test_parse_delete(self):
parsed = parse_cqlsh_statements(
"DELETE FROM songs WHERE songid = 444;")
self.assertSequenceEqual(tokens_with_types(parsed),
[('DELETE', 'identifier'),
('FROM', 'identifier'),
('songs', 'identifier'),
('WHERE', 'identifier'),
('songid', 'identifier'),
('=', 'op'),
('444', 'wholenumber'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"DELETE FROM songs WHERE name IN "
"('Yellow Submarine', 'Eleanor Rigby');")
self.assertSequenceEqual(tokens_with_types(parsed),
[('DELETE', 'identifier'),
('FROM', 'identifier'),
('songs', 'identifier'),
('WHERE', 'identifier'),
('name', 'identifier'),
('IN', 'identifier'),
('(', 'op'),
("'Yellow Submarine'", 'quotedStringLiteral'),
(',', 'op'),
("'Eleanor Rigby'", 'quotedStringLiteral'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"DELETE task_map ['2014-12-25'] from tasks where user_id = 'Santa';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('DELETE', 'identifier'),
('task_map', 'identifier'),
('[', 'brackets'),
("'2014-12-25'", 'quotedStringLiteral'),
(']', 'brackets'),
('from', 'identifier'),
('tasks', 'identifier'),
('where', 'identifier'),
('user_id', 'identifier'),
('=', 'op'),
("'Santa'", 'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"DELETE my_list[0] from lists where user_id = 'Jim';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('DELETE', 'identifier'),
('my_list', 'identifier'),
('[', 'brackets'),
('0', 'wholenumber'),
(']', 'brackets'),
('from', 'identifier'),
('lists', 'identifier'),
('where', 'identifier'),
('user_id', 'identifier'),
('=', 'op'),
("'Jim'", 'quotedStringLiteral'),
(';', 'endtoken')])
def test_parse_batch(self):
pass
def test_parse_create_keyspace(self):
parsed = parse_cqlsh_statements(
"CREATE KEYSPACE ks WITH REPLICATION = "
"{'class': 'SimpleStrategy', 'replication_factor': 1};")
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('KEYSPACE', 'identifier'),
('ks', 'identifier'),
('WITH', 'identifier'),
('REPLICATION', 'identifier'),
('=', 'op'),
('{', 'brackets'),
("'class'", 'quotedStringLiteral'),
(':', 'colon'),
("'SimpleStrategy'", 'quotedStringLiteral'),
(',', 'op'),
("'replication_factor'", 'quotedStringLiteral'),
(':', 'colon'),
('1', 'wholenumber'),
('}', 'brackets'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'CREATE KEYSPACE "Cql_test_KS" WITH REPLICATION = '
"{'class': 'NetworkTopologyStrategy', 'dc1' : 3, 'dc2': 2};")
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('KEYSPACE', 'identifier'),
('"Cql_test_KS"', 'quotedName'),
('WITH', 'identifier'),
('REPLICATION', 'identifier'),
('=', 'op'),
('{', 'brackets'),
("'class'", 'quotedStringLiteral'),
(':', 'colon'),
("'NetworkTopologyStrategy'",
'quotedStringLiteral'),
(',', 'op'),
("'dc1'", 'quotedStringLiteral'),
(':', 'colon'),
('3', 'wholenumber'),
(',', 'op'),
("'dc2'", 'quotedStringLiteral'),
(':', 'colon'),
('2', 'wholenumber'),
('}', 'brackets'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"CREATE KEYSPACE ks WITH REPLICATION = "
"{'class': 'NetworkTopologyStrategy', 'dc1': 3} AND "
"DURABLE_WRITES = false;")
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('KEYSPACE', 'identifier'),
('ks', 'identifier'),
('WITH', 'identifier'),
('REPLICATION', 'identifier'),
('=', 'op'),
('{', 'brackets'),
("'class'", 'quotedStringLiteral'),
(':', 'colon'),
("'NetworkTopologyStrategy'",
'quotedStringLiteral'),
(',', 'op'),
("'dc1'", 'quotedStringLiteral'),
(':', 'colon'),
('3', 'wholenumber'),
('}', 'brackets'),
('AND', 'identifier'),
# 'DURABLE_WRITES' is not a keyword
('DURABLE_WRITES', 'identifier'),
('=', 'op'),
('false', 'identifier'),
(';', 'endtoken')])
def test_parse_drop_keyspace(self):
parsed = parse_cqlsh_statements(
'DROP KEYSPACE ks;')
self.assertSequenceEqual(tokens_with_types(parsed),
[('DROP', 'identifier'),
('KEYSPACE', 'identifier'),
('ks', 'identifier'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'DROP SCHEMA ks;')
self.assertSequenceEqual(tokens_with_types(parsed),
[('DROP', 'identifier'),
('SCHEMA', 'identifier'),
('ks', 'identifier'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'DROP KEYSPACE IF EXISTS "My_ks";')
self.assertSequenceEqual(tokens_with_types(parsed),
[('DROP', 'identifier'),
('KEYSPACE', 'identifier'),
('IF', 'identifier'),
('EXISTS', 'identifier'),
('"My_ks"', 'quotedName'),
(';', 'endtoken')])
def test_parse_create_table(self):
pass
def test_parse_drop_table(self):
pass
def test_parse_truncate(self):
pass
def test_parse_alter_table(self):
pass
def test_parse_use(self):
pass
def test_parse_create_index(self):
parsed = parse_cqlsh_statements(
'CREATE INDEX idx ON ks.tab (i);')
self.assertSequenceEqual(tokens_with_types(parsed),
(('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
(';', 'endtoken')))
parsed = parse_cqlsh_statements(
'CREATE INDEX idx ON ks.tab (i) IF NOT EXISTS;')
self.assertSequenceEqual(tokens_with_types(parsed),
(('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
('IF', 'identifier'),
('NOT', 'identifier'),
('EXISTS', 'identifier'),
(';', 'endtoken')))
parsed = parse_cqlsh_statements(
'CREATE INDEX idx ON tab (KEYS(i));')
self.assertSequenceEqual(tokens_with_types(parsed),
(('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('tab', 'identifier'),
('(', 'op'),
('KEYS', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
(')', 'op'),
(';', 'endtoken')))
parsed = parse_cqlsh_statements(
'CREATE INDEX idx ON ks.tab FULL(i);')
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('FULL', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
'CREATE CUSTOM INDEX idx ON ks.tab (i);')
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('CUSTOM', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"CREATE INDEX idx ON ks.tab (i) USING "
"'org.custom.index.MyIndexClass';")
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
('USING', 'identifier'),
("'org.custom.index.MyIndexClass'",
'quotedStringLiteral'),
(';', 'endtoken')])
parsed = parse_cqlsh_statements(
"CREATE INDEX idx ON ks.tab (i) WITH OPTIONS = "
"{'storage': '/mnt/ssd/indexes/'};")
self.assertSequenceEqual(tokens_with_types(parsed),
[('CREATE', 'identifier'),
('INDEX', 'identifier'),
('idx', 'identifier'),
('ON', 'identifier'),
('ks', 'identifier'),
('.', 'op'),
('tab', 'identifier'),
('(', 'op'),
('i', 'identifier'),
(')', 'op'),
('WITH', 'identifier'),
('OPTIONS', 'identifier'),
('=', 'op'),
('{', 'brackets'),
("'storage'", 'quotedStringLiteral'),
(':', 'colon'),
("'/mnt/ssd/indexes/'", 'quotedStringLiteral'),
('}', 'brackets'),
(';', 'endtoken')])
def test_parse_drop_index(self):
pass
def test_parse_select_token(self):
pass
def parse_cqlsh_statements(text):
'''
Runs its argument through the sequence of parsing steps that cqlsh takes its
input through.
Currently does not handle batch statements.
'''
# based on onecmd
statements, _ = CqlRuleSet.cql_split_statements(text)
# stops here. For regular cql commands, onecmd just splits it and sends it
# off to the cql engine; parsing only happens for cqlsh-specific stmts.
return strip_final_empty_items(statements)[0]
def tokens_with_types(lexed):
for x in lexed:
assert len(x) > 2, lexed
return tuple(itemgetter(1, 0)(token) for token in lexed)
def strip_final_empty_items(xs):
'''
Returns its a copy of argument as a list, but with any terminating
subsequence of falsey values removed.
>>> strip_final_empty_items([[3, 4], [5, 6, 7], [], [], [1], []])
[[3, 4], [5, 6, 7], [], [], [1]]
'''
rv = list(xs)
while rv and not rv[-1]:
rv = rv[:-1]
return rv
| apache-2.0 |
jramsay42/Artillery | src/Gun.py | 1 | 2295 | """ Class representing a single artillery piece. """
from math import sin
from math import cos
from math import radians
from math import floor
EXIT_VELOCITY = 120
GRAVITY = 9.8
DEFAULT_ELEVATION = 45
DEFAULT_AZIMUTH = 45
DEFAULT_AMMO = 10
class Gun(object):
""" Class for representing a particular artillery piece. """
def __repr__(self):
return "Gun with position x: " + str(self.x_pos) + " y: " + str(self.y_pos)
def __init__(self, x, y, elevation=DEFAULT_ELEVATION, azimuth=DEFAULT_AZIMUTH,
ammo=DEFAULT_AMMO):
""" Constructor for the gun class.
Preconditons:
0 < elevation < 90
0 <= azimuth <= 360
0 <= ammo
0 <= x <= MAX_X
0 <= y <= MAX_Y
"""
self.x_pos = x
self.y_pos = y
self.elevation = elevation
self.azimuth = azimuth
self.ammo = ammo
def get_azimuth(self):
""" Returns the angle of orientation. """
return self.azimuth
def get_elevation(self):
""" Returns the angle of elevation. """
return self.elevation
def calculate_range(self):
""" Returns the linear range of the gun at the current elevation. """
return EXIT_VELOCITY ** 2 * sin(2 * radians(self.elevation)) / GRAVITY
def calculate_trajectory(self):
""" Returns the cell that the shell will land at as a tuple. """
shell_range = self.calculate_range()
new_x = self.x_pos + cos(radians(self.azimuth)) * shell_range
new_y = self.y_pos + sin(radians(self.azimuth)) * shell_range
return (floor(new_x), floor(new_y))
def set_azimuth(self, azimuth):
""" Precondition: 0 <= azimuth < 360 """
self.azimuth = azimuth
def set_elevation(self, elevation):
""" Precondition: 0 < elevation < 90 """
self.elevation = elevation
def shoot_gun(self):
""" Returns True if the gun has ammo and updates the ammo. Otherwise,
False is returned. """
if self.ammo > 0:
self.ammo -= 1
return True
return False
def move(self, x, y):
""" Precondition: 0 <= x <= MAX_X, 0 <= y <= MAX_Y """
self.x_pos = x
self.y_pos = y
| gpl-3.0 |
ataylor32/django | django/contrib/admindocs/middleware.py | 477 | 1198 | from django import http
from django.conf import settings
class XViewMiddleware(object):
"""
Adds an X-View header to internal HEAD requests -- used by the documentation system.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
If the request method is HEAD and either the IP is internal or the
user is a logged-in staff member, quickly return with an x-header
indicating the view function. This is used by the documentation module
to lookup the view function for an arbitrary page.
"""
assert hasattr(request, 'user'), (
"The XView middleware requires authentication middleware to be "
"installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.auth.middleware.AuthenticationMiddleware'.")
if request.method == 'HEAD' and (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS or
(request.user.is_active and request.user.is_staff)):
response = http.HttpResponse()
response['X-View'] = "%s.%s" % (view_func.__module__, view_func.__name__)
return response
| bsd-3-clause |
GhostThrone/django | tests/gis_tests/gdal_tests/test_driver.py | 335 | 1253 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
valid_drivers = (
# vector
'ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN', 'Memory', 'CSV',
'GML', 'KML',
# raster
'GTiff', 'JPEG', 'MEM', 'PNG',
)
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp', 'ESRI rast')
aliases = {
'eSrI': 'ESRI Shapefile',
'TigER/linE': 'TIGER',
'SHAPE': 'ESRI Shapefile',
'sHp': 'ESRI Shapefile',
'tiFf': 'GTiff',
'tIf': 'GTiff',
'jPEg': 'JPEG',
'jpG': 'JPEG',
}
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid GDAL/OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid GDAL/OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(GDALException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
| bsd-3-clause |
gazpachoking/Flexget | flexget/components/sites/sites/rmz.py | 4 | 3817 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
from flexget.components.sites.utils import normalize_unicode
from requests.exceptions import RequestException
log = logging.getLogger('rmz')
class UrlRewriteRmz(object):
"""
rmz.cr (rapidmoviez.com) urlrewriter
Version 0.1
Configuration
rmz:
filehosters_re:
- domain\.com
- domain2\.org
Only add links that match any of the regular expressions listed under filehosters_re.
If more than one valid link is found, the url of the entry is rewritten to
the first link found. The complete list of valid links is placed in the
'urls' field of the entry.
Therefore, it is recommended, that you configure your output to use the
'urls' field instead of the 'url' field.
For example, to use jdownloader 2 as output, you would use the exec plugin:
exec:
- echo "text={{urls}}" >> "/path/to/jd2/folderwatch/{{title}}.crawljob"
"""
schema = {
'type': 'object',
'properties': {'filehosters_re': {'type': 'array', 'items': {'format': 'regexp'}}},
'additionalProperties': False,
}
# Since the urlrewriter relies on a config, we need to create a default one
config = {'filehosters_re': []}
# grab config
def on_task_start(self, task, config):
self.config = config
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
rewritable_regex = r'^https?:\/\/(www.)?(rmz\.cr|rapidmoviez\.(com|eu))\/.*'
return re.match(rewritable_regex, url) is not None
@plugin.internet(log)
# urlrewriter API
def url_rewrite(self, task, entry):
try:
page = task.requests.get(entry['url'])
except RequestException as e:
raise UrlRewritingError(str(e))
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(str(e))
link_elements = soup.find_all('pre', class_='links')
if 'urls' in entry:
urls = list(entry['urls'])
else:
urls = []
for element in link_elements:
urls.extend(element.text.splitlines())
regexps = self.config.get('filehosters_re', [])
filtered_urls = []
for i, url in enumerate(urls):
urls[i] = normalize_unicode(url)
for regexp in regexps:
if re.search(regexp, urls[i]):
filtered_urls.append(urls[i])
log.debug('Url: "%s" matched filehoster filter: %s', urls[i], regexp)
break
else:
if regexps:
log.debug(
'Url: "%s" does not match any of the given filehoster filters: %s',
urls[i],
str(regexps),
)
if regexps:
log.debug('Using filehosters_re filters: %s', str(regexps))
urls = filtered_urls
else:
log.debug('No filehoster filters configured, using all found links.')
num_links = len(urls)
log.verbose('Found %d links at %s.', num_links, entry['url'])
if num_links:
entry['urls'] = urls
entry['url'] = urls[0]
else:
raise UrlRewritingError('No useable links found at %s' % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRmz, 'rmz', interfaces=['urlrewriter', 'task'], api_ver=2)
| mit |
guorendong/iridium-browser-ubuntu | ppapi/generators/idl_c_proto.py | 46 | 25722 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C style prototypes and definitions """
import glob
import os
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_parser import ParseFiles
Option('cgen_debug', 'Debug generate.')
class CGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
def CommentLines(lines, tabs=0):
# Generate a C style comment block by prepending the block with '<tab>/*'
# and adding a '<tab> *' per line.
tab = ' ' * tabs
out = '%s/*' % tab + ('\n%s *' % tab).join(lines)
# Add a terminating ' */' unless the last line is blank which would mean it
# already has ' *'
if not lines[-1]:
out += '/\n'
else:
out += ' */\n'
return out
def Comment(node, prefix=None, tabs=0):
# Generate a comment block from the provided Comment node.
comment = node.GetName()
lines = comment.split('\n')
# If an option prefix is provided, then prepend that to the comment
# for this node.
if prefix:
prefix_lines = prefix.split('\n')
# If both the prefix and comment start with a blank line ('*') remove
# the extra one.
if prefix_lines[0] == '*' and lines[0] == '*':
lines = prefix_lines + lines[1:]
else:
lines = prefix_lines + lines;
return CommentLines(lines, tabs)
def GetNodeComments(node, tabs=0):
# Generate a comment block joining all comment nodes which are children of
# the provided node.
comment_txt = ''
for doc in node.GetListOf('Comment'):
comment_txt += Comment(doc, tabs=tabs)
return comment_txt
class CGen(object):
# TypeMap
#
# TypeMap modifies how an object is stored or passed, for example pointers
# are passed as 'const' if they are 'in' parameters, and structures are
# preceeded by the keyword 'struct' as well as using a pointer.
#
TypeMap = {
'Array': {
'in': 'const %s',
'inout': '%s',
'out': '%s*',
'store': '%s',
'return': '%s',
'ref': '%s*'
},
'Callspec': {
'in': '%s',
'inout': '%s',
'out': '%s',
'store': '%s',
'return': '%s'
},
'Enum': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'store': '%s',
'return': '%s'
},
'Interface': {
'in': 'const %s*',
'inout': '%s*',
'out': '%s**',
'return': '%s*',
'store': '%s*'
},
'Struct': {
'in': 'const %s*',
'inout': '%s*',
'out': '%s*',
'return': ' %s*',
'store': '%s',
'ref': '%s*'
},
'blob_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_ptr_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'str_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': 'const %s',
'store': '%s'
},
'cstr_t': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'return': '%s',
'store': '%s'
},
'TypeValue': {
'in': '%s',
'constptr_in': 'const %s*', # So we can use const* for PP_Var sometimes.
'inout': '%s*',
'out': '%s*',
'return': '%s',
'store': '%s'
},
}
#
# RemapName
#
# A diction array of PPAPI types that are converted to language specific
# types before being returned by by the C generator
#
RemapName = {
'blob_t': 'void**',
'float_t': 'float',
'double_t': 'double',
'handle_t': 'int',
'mem_t': 'void*',
'mem_ptr_t': 'void**',
'str_t': 'char*',
'cstr_t': 'const char*',
'interface_t' : 'const void*'
}
# Tell how to handle pointers to GL types.
for gltype in ['GLbitfield', 'GLboolean', 'GLbyte', 'GLclampf',
'GLclampx', 'GLenum', 'GLfixed', 'GLfloat', 'GLint',
'GLintptr', 'GLshort', 'GLsizei', 'GLsizeiptr',
'GLubyte', 'GLuint', 'GLushort']:
ptrtype = gltype + '_ptr_t'
TypeMap[ptrtype] = {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': 'const %s',
'store': '%s'
}
RemapName[ptrtype] = '%s*' % gltype
def __init__(self):
self.dbg_depth = 0
#
# Debug Logging functions
#
def Log(self, txt):
if not GetOption('cgen_debug'): return
tabs = ' ' * self.dbg_depth
print '%s%s' % (tabs, txt)
def LogEnter(self, txt):
if txt: self.Log(txt)
self.dbg_depth += 1
def LogExit(self, txt):
self.dbg_depth -= 1
if txt: self.Log(txt)
def GetDefine(self, name, value):
out = '#define %s %s' % (name, value)
if len(out) > 80:
out = '#define %s \\\n %s' % (name, value)
return '%s\n' % out
#
# Interface strings
#
def GetMacroHelper(self, node):
macro = node.GetProperty('macro')
if macro: return macro
name = node.GetName()
name = name.upper()
return "%s_INTERFACE" % name
def GetInterfaceMacro(self, node, version = None):
name = self.GetMacroHelper(node)
if version is None:
return name
return '%s_%s' % (name, str(version).replace('.', '_'))
def GetInterfaceString(self, node, version = None):
# If an interface name is specified, use that
name = node.GetProperty('iname')
if not name:
# Otherwise, the interface name is the object's name
# With '_Dev' replaced by '(Dev)' if it's a Dev interface.
name = node.GetName()
if name.endswith('_Dev'):
name = '%s(Dev)' % name[:-4]
if version is None:
return name
return "%s;%s" % (name, version)
#
# Return the array specification of the object.
#
def GetArraySpec(self, node):
assert(node.cls == 'Array')
fixed = node.GetProperty('FIXED')
if fixed:
return '[%s]' % fixed
else:
return '[]'
#
# GetTypeName
#
# For any valid 'typed' object such as Member or Typedef
# the typenode object contains the typename
#
# For a given node return the type name by passing mode.
#
def GetTypeName(self, node, release, prefix=''):
self.LogEnter('GetTypeName of %s rel=%s' % (node, release))
# For Members, Params, and Typedefs get the type it refers to otherwise
# the node in question is it's own type (struct, union etc...)
if node.IsA('Member', 'Param', 'Typedef'):
typeref = node.GetType(release)
else:
typeref = node
if typeref is None:
node.Error('No type at release %s.' % release)
raise CGenError('No type for %s' % node)
# If the type is a (BuiltIn) Type then return it's name
# remapping as needed
if typeref.IsA('Type'):
name = CGen.RemapName.get(typeref.GetName(), None)
if name is None: name = typeref.GetName()
name = '%s%s' % (prefix, name)
# For Interfaces, use the name + version
elif typeref.IsA('Interface'):
rel = typeref.first_release[release]
name = 'struct %s%s' % (prefix, self.GetStructName(typeref, rel, True))
# For structures, preceed with 'struct' or 'union' as appropriate
elif typeref.IsA('Struct'):
if typeref.GetProperty('union'):
name = 'union %s%s' % (prefix, typeref.GetName())
else:
name = 'struct %s%s' % (prefix, typeref.GetName())
# If it's an enum, or typedef then return the Enum's name
elif typeref.IsA('Enum', 'Typedef'):
if not typeref.LastRelease(release):
first = node.first_release[release]
ver = '_' + node.GetVersion(first).replace('.','_')
else:
ver = ''
# The enum may have skipped having a typedef, we need prefix with 'enum'.
if typeref.GetProperty('notypedef'):
name = 'enum %s%s%s' % (prefix, typeref.GetName(), ver)
else:
name = '%s%s%s' % (prefix, typeref.GetName(), ver)
else:
raise RuntimeError('Getting name of non-type %s.' % node)
self.LogExit('GetTypeName %s is %s' % (node, name))
return name
#
# GetRootType
#
# For a given node return basic type of that object. This is
# either a 'Type', 'Callspec', or 'Array'
#
def GetRootTypeMode(self, node, release, mode):
self.LogEnter('GetRootType of %s' % node)
# If it has an array spec, then treat it as an array regardless of type
if node.GetOneOf('Array'):
rootType = 'Array'
# Or if it has a callspec, treat it as a function
elif node.GetOneOf('Callspec'):
rootType, mode = self.GetRootTypeMode(node.GetType(release), release,
'return')
# If it's a plain typedef, try that object's root type
elif node.IsA('Member', 'Param', 'Typedef'):
rootType, mode = self.GetRootTypeMode(node.GetType(release),
release, mode)
# If it's an Enum, then it's normal passing rules
elif node.IsA('Enum'):
rootType = node.cls
# If it's an Interface or Struct, we may be passing by value
elif node.IsA('Interface', 'Struct'):
if mode == 'return':
if node.GetProperty('returnByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
else:
if node.GetProperty('passByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
# If it's an Basic Type, check if it's a special type
elif node.IsA('Type'):
if node.GetName() in CGen.TypeMap:
rootType = node.GetName()
else:
rootType = 'TypeValue'
else:
raise RuntimeError('Getting root type of non-type %s.' % node)
self.LogExit('RootType is "%s"' % rootType)
return rootType, mode
def GetTypeByMode(self, node, release, mode):
self.LogEnter('GetTypeByMode of %s mode=%s release=%s' %
(node, mode, release))
name = self.GetTypeName(node, release)
ntype, mode = self.GetRootTypeMode(node, release, mode)
out = CGen.TypeMap[ntype][mode] % name
self.LogExit('GetTypeByMode %s = %s' % (node, out))
return out
# Get the passing mode of the object (in, out, inout).
def GetParamMode(self, node):
self.Log('GetParamMode for %s' % node)
if node.GetProperty('in'): return 'in'
if node.GetProperty('out'): return 'out'
if node.GetProperty('inout'): return 'inout'
if node.GetProperty('constptr_in'): return 'constptr_in'
return 'return'
#
# GetComponents
#
# Returns the signature components of an object as a tuple of
# (rtype, name, arrays, callspec) where:
# rtype - The store or return type of the object.
# name - The name of the object.
# arrays - A list of array dimensions as [] or [<fixed_num>].
# args - None if not a function, otherwise a list of parameters.
#
def GetComponents(self, node, release, mode):
self.LogEnter('GetComponents mode %s for %s %s' % (mode, node, release))
# Generate passing type by modifying root type
rtype = self.GetTypeByMode(node, release, mode)
# If this is an array output, change it from type* foo[] to type** foo.
# type* foo[] means an array of pointers to type, which is confusing.
arrayspec = [self.GetArraySpec(array) for array in node.GetListOf('Array')]
if mode == 'out' and len(arrayspec) == 1 and arrayspec[0] == '[]':
rtype += '*'
del arrayspec[0]
if node.IsA('Enum', 'Interface', 'Struct'):
rname = node.GetName()
else:
rname = node.GetType(release).GetName()
if rname in CGen.RemapName:
rname = CGen.RemapName[rname]
if '%' in rtype:
rtype = rtype % rname
name = node.GetName()
callnode = node.GetOneOf('Callspec')
if callnode:
callspec = []
for param in callnode.GetListOf('Param'):
if not param.IsRelease(release):
continue
mode = self.GetParamMode(param)
ptype, pname, parray, pspec = self.GetComponents(param, release, mode)
callspec.append((ptype, pname, parray, pspec))
else:
callspec = None
self.LogExit('GetComponents: %s, %s, %s, %s' %
(rtype, name, arrayspec, callspec))
return (rtype, name, arrayspec, callspec)
def Compose(self, rtype, name, arrayspec, callspec, prefix, func_as_ptr,
include_name, unsized_as_ptr):
self.LogEnter('Compose: %s %s' % (rtype, name))
arrayspec = ''.join(arrayspec)
# Switch unsized array to a ptr. NOTE: Only last element can be unsized.
if unsized_as_ptr and arrayspec[-2:] == '[]':
prefix += '*'
arrayspec=arrayspec[:-2]
if not include_name:
name = prefix + arrayspec
else:
name = prefix + name + arrayspec
if callspec is None:
out = '%s %s' % (rtype, name)
else:
params = []
for ptype, pname, parray, pspec in callspec:
params.append(self.Compose(ptype, pname, parray, pspec, '', True,
include_name=True,
unsized_as_ptr=unsized_as_ptr))
if func_as_ptr:
name = '(*%s)' % name
if not params:
params = ['void']
out = '%s %s(%s)' % (rtype, name, ', '.join(params))
self.LogExit('Exit Compose: %s' % out)
return out
#
# GetSignature
#
# Returns the 'C' style signature of the object
# prefix - A prefix for the object's name
# func_as_ptr - Formats a function as a function pointer
# include_name - If true, include member name in the signature.
# If false, leave it out. In any case, prefix is always
# included.
# include_version - if True, include version in the member name
#
def GetSignature(self, node, release, mode, prefix='', func_as_ptr=True,
include_name=True, include_version=False):
self.LogEnter('GetSignature %s %s as func=%s' %
(node, mode, func_as_ptr))
rtype, name, arrayspec, callspec = self.GetComponents(node, release, mode)
if include_version:
name = self.GetStructName(node, release, True)
# If not a callspec (such as a struct) use a ptr instead of []
unsized_as_ptr = not callspec
out = self.Compose(rtype, name, arrayspec, callspec, prefix,
func_as_ptr, include_name, unsized_as_ptr)
self.LogExit('Exit GetSignature: %s' % out)
return out
# Define a Typedef.
def DefineTypedef(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment'
build_list = node.GetUniqueReleases(releases)
out = 'typedef %s;\n' % self.GetSignature(node, build_list[-1], 'return',
prefix, True,
include_version=False)
# Version mangle any other versions
for index, rel in enumerate(build_list[:-1]):
out += '\n'
out += 'typedef %s;\n' % self.GetSignature(node, rel, 'return',
prefix, True,
include_version=True)
self.Log('DefineTypedef: %s' % out)
return out
# Define an Enum.
def DefineEnum(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment,releases'
self.LogEnter('DefineEnum %s' % node)
name = '%s%s' % (prefix, node.GetName())
notypedef = node.GetProperty('notypedef')
unnamed = node.GetProperty('unnamed')
if unnamed:
out = 'enum {'
elif notypedef:
out = 'enum %s {' % name
else:
out = 'typedef enum {'
enumlist = []
for child in node.GetListOf('EnumItem'):
value = child.GetProperty('VALUE')
comment_txt = GetNodeComments(child, tabs=1)
if value:
item_txt = '%s%s = %s' % (prefix, child.GetName(), value)
else:
item_txt = '%s%s' % (prefix, child.GetName())
enumlist.append('%s %s' % (comment_txt, item_txt))
self.LogExit('Exit DefineEnum')
if unnamed or notypedef:
out = '%s\n%s\n};\n' % (out, ',\n'.join(enumlist))
else:
out = '%s\n%s\n} %s;\n' % (out, ',\n'.join(enumlist), name)
return out
def DefineMember(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=prefix,comment'
release = releases[0]
self.LogEnter('DefineMember %s' % node)
if node.GetProperty('ref'):
out = '%s;' % self.GetSignature(node, release, 'ref', '', True)
else:
out = '%s;' % self.GetSignature(node, release, 'store', '', True)
self.LogExit('Exit DefineMember')
return out
def GetStructName(self, node, release, include_version=False):
suffix = ''
if include_version:
ver_num = node.GetVersion(release)
suffix = ('_%s' % ver_num).replace('.', '_')
return node.GetName() + suffix
def DefineStructInternals(self, node, release,
include_version=False, comment=True):
channel = node.GetProperty('FILE').release_map.GetChannel(release)
if channel == 'dev':
channel_comment = ' /* dev */'
else:
channel_comment = ''
out = ''
if node.GetProperty('union'):
out += 'union %s {%s\n' % (
self.GetStructName(node, release, include_version), channel_comment)
else:
out += 'struct %s {%s\n' % (
self.GetStructName(node, release, include_version), channel_comment)
channel = node.GetProperty('FILE').release_map.GetChannel(release)
# Generate Member Functions
members = []
for child in node.GetListOf('Member'):
if channel == 'stable' and child.NodeIsDevOnly():
continue
member = self.Define(child, [release], tabs=1, comment=comment)
if not member:
continue
members.append(member)
out += '%s\n};\n' % '\n'.join(members)
return out
def DefineUnversionedInterface(self, node, rel):
out = '\n'
if node.GetProperty('force_struct_namespace'):
# Duplicate the definition to put it in struct namespace. This
# attribute is only for legacy APIs like OpenGLES2 and new APIs
# must not use this. See http://crbug.com/411799
out += self.DefineStructInternals(node, rel,
include_version=False, comment=True)
else:
# Define an unversioned typedef for the most recent version
out += 'typedef struct %s %s;\n' % (
self.GetStructName(node, rel, include_version=True),
self.GetStructName(node, rel, include_version=False))
return out
def DefineStruct(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment,prefix'
self.LogEnter('DefineStruct %s' % node)
out = ''
build_list = node.GetUniqueReleases(releases)
newest_stable = None
newest_dev = None
for rel in build_list:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
if channel == 'stable':
newest_stable = rel
if channel == 'dev':
newest_dev = rel
last_rel = build_list[-1]
# TODO(bradnelson) : Bug 157017 finish multiversion support
if node.IsA('Struct'):
if len(build_list) != 1:
node.Error('Can not support multiple versions of node.')
assert len(build_list) == 1
# Build the most recent one versioned, with comments
out = self.DefineStructInternals(node, last_rel,
include_version=False, comment=True)
if node.IsA('Interface'):
# Build the most recent one versioned, with comments
out = self.DefineStructInternals(node, last_rel,
include_version=True, comment=True)
if last_rel == newest_stable:
out += self.DefineUnversionedInterface(node, last_rel)
# Build the rest without comments and with the version number appended
for rel in build_list[0:-1]:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
# Skip dev channel interface versions that are
# Not the newest version, and
# Don't have an equivalent stable version.
if channel == 'dev' and rel != newest_dev:
if not node.DevInterfaceMatchesStable(rel):
continue
out += '\n' + self.DefineStructInternals(node, rel,
include_version=True,
comment=False)
if rel == newest_stable:
out += self.DefineUnversionedInterface(node, rel)
self.LogExit('Exit DefineStruct')
return out
#
# Copyright and Comment
#
# Generate a comment or copyright block
#
def Copyright(self, node, cpp_style=False):
lines = node.GetName().split('\n')
if cpp_style:
return '//' + '\n//'.join(filter(lambda f: f != '', lines)) + '\n'
return CommentLines(lines)
def Indent(self, data, tabs=0):
"""Handles indentation and 80-column line wrapping."""
tab = ' ' * tabs
lines = []
for line in data.split('\n'):
# Add indentation
line = tab + line
space_break = line.rfind(' ', 0, 80)
if len(line) <= 80 or 'http://' in line:
# Ignore normal line and URLs permitted by the style guide.
lines.append(line.rstrip())
elif not '(' in line and space_break >= 0:
# Break long typedefs on nearest space.
lines.append(line[0:space_break])
lines.append(' ' + line[space_break + 1:])
else:
left = line.rfind('(') + 1
args = line[left:].split(',')
orig_args = args
orig_left = left
# Try to split on '(arg1)' or '(arg1, arg2)', not '()'
while args[0][0] == ')':
left = line.rfind('(', 0, left - 1) + 1
if left == 0: # No more parens, take the original option
args = orig_args
left = orig_left
break
args = line[left:].split(',')
line_max = 0
for arg in args:
if len(arg) > line_max: line_max = len(arg)
if left + line_max >= 80:
indent = '%s ' % tab
args = (',\n%s' % indent).join([arg.strip() for arg in args])
lines.append('%s\n%s%s' % (line[:left], indent, args))
else:
indent = ' ' * (left - 1)
args = (',\n%s' % indent).join(args)
lines.append('%s%s' % (line[:left], args))
return '\n'.join(lines)
# Define a top level object.
def Define(self, node, releases, tabs=0, prefix='', comment=False):
# If this request does not match unique release, or if the release is not
# available (possibly deprecated) then skip.
unique = node.GetUniqueReleases(releases)
if not unique or not node.InReleases(releases):
return ''
self.LogEnter('Define %s tab=%d prefix="%s"' % (node,tabs,prefix))
declmap = dict({
'Enum': CGen.DefineEnum,
'Function': CGen.DefineMember,
'Interface': CGen.DefineStruct,
'Member': CGen.DefineMember,
'Struct': CGen.DefineStruct,
'Typedef': CGen.DefineTypedef
})
out = ''
func = declmap.get(node.cls, None)
if not func:
ErrOut.Log('Failed to define %s named %s' % (node.cls, node.GetName()))
define_txt = func(self, node, releases, prefix=prefix, comment=comment)
comment_txt = GetNodeComments(node, tabs=0)
if comment_txt and comment:
out += comment_txt
out += define_txt
indented_out = self.Indent(out, tabs)
self.LogExit('Exit Define')
return indented_out
# Clean a string representing an object definition and return then string
# as a single space delimited set of tokens.
def CleanString(instr):
instr = instr.strip()
instr = instr.split()
return ' '.join(instr)
# Test a file, by comparing all it's objects, with their comments.
def TestFile(filenode):
cgen = CGen()
errors = 0
for node in filenode.GetChildren()[2:]:
instr = node.GetOneOf('Comment')
if not instr: continue
instr.Dump()
instr = CleanString(instr.GetName())
outstr = cgen.Define(node, releases=['M14'])
if GetOption('verbose'):
print outstr + '\n'
outstr = CleanString(outstr)
if instr != outstr:
ErrOut.Log('Failed match of\n>>%s<<\nto:\n>>%s<<\nFor:\n' %
(instr, outstr))
node.Dump(1, comments=True)
errors += 1
return errors
# Build and resolve the AST and compare each file individual.
def TestFiles(filenames):
if not filenames:
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_cgen', '*.idl')
filenames = glob.glob(idldir)
filenames = sorted(filenames)
ast = ParseFiles(filenames)
total_errs = 0
for filenode in ast.GetListOf('File'):
errs = TestFile(filenode)
if errs:
ErrOut.Log('%s test failed with %d error(s).' %
(filenode.GetName(), errs))
total_errs += errs
if total_errs:
ErrOut.Log('Failed generator test.')
else:
InfoOut.Log('Passed generator test.')
return total_errs
def main(args):
filenames = ParseOptions(args)
if GetOption('test'):
return TestFiles(filenames)
ast = ParseFiles(filenames)
cgen = CGen()
for f in ast.GetListOf('File'):
if f.GetProperty('ERRORS') > 0:
print 'Skipping %s' % f.GetName()
continue
for node in f.GetChildren()[2:]:
print cgen.Define(node, ast.releases, comment=True, prefix='tst_')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
beermix/namebench | nb_third_party/dns/edns.py | 248 | 4312 | # Copyright (C) 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param rdtype: The rdata type
@type rdtype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def _cmp(self, other):
"""Compare an ENDS option with another option of the same type.
Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current : current + olen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)
| apache-2.0 |
chrisidefix/devide | modules/user/imageCurvature.py | 7 | 2555 | import gen_utils
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtktud
class imageCurvature(ModuleBase, NoConfigModuleMixin):
"""Calculates image curvature with VTKTUD vtkImageCurvature filter.
You need 8 inputs, and in the following sequence: dx, dy, dz,
dxx, dyy, dzz, dxy, dxz, dyz.
This will output some curvature measure. The underlying filter will
be adapted to make the raw curvature data (principal curvatures
and directions of the isophote surface) available as well.
All code by Joris van Zwieten. This bit of documentation by cpbotha.
"""
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(self)
self._imageCurvature = vtktud.vtkImageCurvature()
# module_utils.setup_vtk_object_progress(self, self._clipPolyData,
# 'Calculating normals')
self._viewFrame = self._createViewFrame(
{'ImageCurvature' : self._imageCurvature})
# pass the data down to the underlying logic
self.config_to_logic()
# and all the way up from logic -> config -> view to make sure
self.syncViewWithLogic()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._imageCurvature
def get_input_descriptions(self):
return ('dx', 'dy', 'dz', 'dxx', 'dyy', 'dzz', 'dxy', 'dxz', 'dyz')
def set_input(self, idx, inputStream):
self._imageCurvature.SetInput(idx, inputStream)
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._imageCurvature.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
self._imageCurvature.Update()
def view(self, parent_window=None):
# if the window was visible already. just raise it
if not self._viewFrame.Show(True):
self._viewFrame.Raise()
| bsd-3-clause |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| mit |
pfmoore/invoke | tests/loader.py | 1 | 2601 | import imp
import os
import sys
from spec import Spec, eq_, raises
from invoke.loader import Loader, FilesystemLoader as FSLoader
from invoke.collection import Collection
from invoke.exceptions import CollectionNotFound
from _util import support
class _BasicLoader(Loader):
"""
Tests top level Loader behavior with basic finder stub.
Used when we want to make sure we're testing Loader.load and not e.g.
FilesystemLoader's specific implementation.
"""
def find(self, name):
self.fd, self.path, self.desc = t = imp.find_module(name, [support])
return t
class Loader_(Spec):
def adds_module_parent_dir_to_sys_path(self):
# Crummy doesn't-explode test.
_BasicLoader().load('namespacing')
def doesnt_dupliate_parent_dir_addition(self):
_BasicLoader().load('namespacing')
_BasicLoader().load('namespacing')
# If the bug is present, this will be 2 at least (and often more, since
# other tests will pollute it (!).
eq_(sys.path.count(support), 1)
def closes_opened_file_object(self):
loader = _BasicLoader()
loader.load('foo')
assert loader.fd.closed
def can_load_package(self):
loader = _BasicLoader()
# make sure it doesn't explode
loader.load('package')
class FilesystemLoader_(Spec):
def setup(self):
self.l = FSLoader(start=support)
def exposes_discovery_start_point(self):
start = '/tmp/'
eq_(FSLoader(start=start).start, start)
def has_a_default_discovery_start_point(self):
eq_(FSLoader().start, os.getcwd())
def returns_collection_object_if_name_found(self):
result = self.l.load('foo')
eq_(type(result), Collection)
@raises(CollectionNotFound)
def raises_CollectionNotFound_if_not_found(self):
self.l.load('nope')
@raises(ImportError)
def raises_ImportError_if_found_collection_cannot_be_imported(self):
# Instead of masking with a CollectionNotFound
self.l.load('oops')
def searches_towards_root_of_filesystem(self):
# Loaded while root is in same dir as .py
directly = self.l.load('foo')
# Loaded while root is multiple dirs deeper than the .py
deep = os.path.join(support, 'ignoreme', 'ignoremetoo')
indirectly = FSLoader(start=deep).load('foo')
eq_(directly, indirectly)
def defaults_to_tasks_collection(self):
"defaults to 'tasks' collection"
result = FSLoader(start=support + '/implicit/').load()
eq_(type(result), Collection)
| bsd-2-clause |
shankisg/wye | wye/organisations/forms.py | 3 | 1988 | from django import forms
# import autocomplete_light
from .models import Organisation, User
class OrganisationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OrganisationForm, self).__init__(*args, **kwargs)
class Meta:
model = Organisation
exclude = ('user', 'created_at', 'modified_at',
'active', 'created_by', 'modified_by')
# widgets = {
# 'name': autocomplete_light.TextWidget('OrganisationAutocomplete'),
# }
class OrganisationMemberAddForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OrganisationMemberAddForm, self).__init__(*args, **kwargs)
class Meta:
model = Organisation
exclude = ('user', 'created_at', 'modified_at',
'name', 'organisation_type', 'description',
'location', 'organisation_role',
'active', 'created_by', 'modified_by')
existing_user = forms.ModelChoiceField(queryset=User.objects.all(), required=False)
new_user = forms.EmailField(label='Invite New User', required=False)
class UserRegistrationForm(forms.ModelForm):
"""
Form class for completing a user's registration and activating the
User.
The class operates on a user model which is assumed to have the required
fields of a BaseUserModel
"""
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
password = forms.CharField(max_length=30, widget=forms.PasswordInput)
password_confirm = forms.CharField(max_length=30,
widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
self.initial['username'] = ''
class Meta:
model = User
exclude = ('is_staff', 'is_superuser', 'is_active', 'last_login',
'date_joined', 'groups', 'user_permissions')
| mit |
jtux270/translate | ovirt/3.6_source/packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/provisioning/postgres.py | 7 | 7319 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Local Postgres plugin."""
import gettext
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import dialog
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import postgres
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Local Postgres plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
self._renamedDBResources = False
self._provisioning = postgres.Provisioning(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
defaults=oenginecons.Const.DEFAULT_ENGINE_DB_ENV_KEYS,
)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
after=(
oengcommcons.Stages.DB_CONNECTION_SETUP,
),
condition=lambda self: (
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
] and
self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
]
),
)
def _setup(self):
self._provisioning.detectCommands()
self._enabled = self._provisioning.supported()
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
oengcommcons.Stages.DIALOG_TITLES_E_DATABASE,
oengcommcons.Stages.DB_CONNECTION_CUSTOMIZATION,
),
after=(
oengcommcons.Stages.DIALOG_TITLES_S_DATABASE,
),
condition=lambda self: not self.environment[
oenginecons.CoreEnv.ENABLE
],
name=oenginecons.Stages.POSTGRES_PROVISIONING_ALLOWED,
)
def _customization_enable(self):
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
oengcommcons.Stages.DIALOG_TITLES_E_DATABASE,
oengcommcons.Stages.DB_CONNECTION_CUSTOMIZATION,
),
after=(
oenginecons.Stages.POSTGRES_PROVISIONING_ALLOWED,
),
condition=lambda self: self._enabled,
)
def _customization(self):
if self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED
] is None:
local = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_PROVISIONING_POSTGRES_LOCATION',
note=_(
'Where is the Engine database located? '
'(@VALUES@) [@DEFAULT@]: '
),
prompt=True,
true=_('Local'),
false=_('Remote'),
default=True,
)
if local:
self.environment[oenginecons.EngineDBEnv.HOST] = 'localhost'
self.environment[
oenginecons.EngineDBEnv.PORT
] = oenginecons.Defaults.DEFAULT_DB_PORT
# TODO:
# consider creating database and role
# at engine_@RANDOM@
self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_PROVISIONING_POSTGRES_ENABLED',
note=_(
'Setup can configure the local postgresql server '
'automatically for the engine to run. This may '
'conflict with existing applications.\n'
'Would you like Setup to automatically configure '
'postgresql and create Engine database, '
'or prefer to perform that '
'manually? (@VALUES@) [@DEFAULT@]: '
),
prompt=True,
true=_('Automatic'),
false=_('Manual'),
default=True,
)
else:
self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED
] = False
self._enabled = self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED
]
if self._enabled:
self._provisioning.applyEnvironment()
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
priority=plugin.Stages.PRIORITY_LAST,
condition=lambda self: (
self.environment[
oenginecons.EngineDBEnv.HOST
] == 'localhost'
),
)
def _customization_firewall(self):
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].extend([
{
'name': 'ovirt-postgres',
'directory': 'ovirt-common'
},
])
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
)
def _validation(self):
self._provisioning.validate()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
before=(
oengcommcons.Stages.DB_CREDENTIALS_AVAILABLE_LATE,
oengcommcons.Stages.DB_SCHEMA,
),
after=(
osetupcons.Stages.SYSTEM_SYSCTL_CONFIG_AVAILABLE,
),
condition=lambda self: self._enabled,
)
def _misc(self):
self._provisioning.provision()
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
condition=lambda self: self._provisioning.databaseRenamed,
)
def _closeup(self):
self.dialog.note(
text=_(
'Engine database resources:\n'
' Database name: {database}\n'
' Database user name: {user}\n'
).format(
database=self.environment[
oenginecons.EngineDBEnv.DATABASE
],
user=self.environment[
oenginecons.EngineDBEnv.USER
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| gpl-3.0 |
python-dirbtuves/Misago | misago/models/settingmodel.py | 3 | 5724 | import base64
from django.core import validators
from django.db import models
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from misago.utils.timezones import tzlist
try:
import cPickle as pickle
except ImportError:
import pickle
class Setting(models.Model):
setting = models.CharField(max_length=255, primary_key=True)
group = models.ForeignKey('SettingsGroup', to_field='key')
_value = models.TextField(db_column='value', null=True, blank=True)
value_default = models.TextField(null=True, blank=True)
normalize_to = models.CharField(max_length=255)
field = models.CharField(max_length=255)
extra = models.TextField(null=True, blank=True)
position = models.IntegerField(default=0)
separator = models.CharField(max_length=255, null=True, blank=True)
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
class Meta:
app_label = 'misago'
def get_extra(self):
return pickle.loads(base64.decodestring(self.extra))
@property
def value(self):
if self.normalize_to == 'array':
return self._value.split(',')
if self.normalize_to == 'integer':
return int(self._value)
if self.normalize_to == 'float':
return float(self._value)
if self.normalize_to == 'boolean':
return self._value == "1"
return self._value
@value.setter
def value(self, value):
if self.normalize_to == 'array':
self._value = ','.join(value)
elif self.normalize_to == 'integer':
self._value = int(value)
elif self.normalize_to == 'float':
self._value = float(value)
elif self.normalize_to == 'boolean':
self._value = 1 if value else 0
else:
self._value = value
if not self._value and self.value_default:
self._value = self.value_default
return self._value
def get_field(self):
from misago.forms import YesNoSwitch
extra = self.get_extra()
# Set validators
field_validators = []
if 'min' in extra:
if self.normalize_to in ('string', 'array'):
field_validators.append(validators.MinLengthValidator(extra['min']))
if self.normalize_to in ('integer', 'float'):
field_validators.append(validators.MinValueValidator(extra['min']))
if 'max' in extra:
if self.normalize_to in ('string', 'array'):
field_validators.append(validators.MaxLengthValidator(extra['max']))
if self.normalize_to in ('integer', 'float'):
field_validators.append(validators.MaxValueValidator(extra['max']))
# Yes-no
if self.field == 'yesno':
return forms.BooleanField(
initial=self.value,
label=_(self.name),
help_text=_(self.description) if self.description else None,
required=False,
widget=YesNoSwitch,
)
# Multi-list
if self.field == 'mlist':
return forms.MultipleChoiceField(
initial=self.value,
label=_(self.name),
help_text=_(self.description) if self.description else None,
widget=forms.CheckboxSelectMultiple,
validators=field_validators,
required=False,
choices=extra['choices']
)
# Select or choice
if self.field == 'select' or self.field == 'choice':
# Timezone list?
if extra['choices'] == '#TZ#':
extra['choices'] = tzlist()
return forms.ChoiceField(
initial=self.value,
label=_(self.name),
help_text=_(self.description) if self.description else None,
widget=forms.RadioSelect if self.field == 'choice' else forms.Select,
validators=field_validators,
required=False,
choices=extra['choices']
)
# Textarea
if self.field == 'textarea':
return forms.CharField(
initial=self.value,
label=_(self.name),
help_text=_(self.description) if self.description else None,
validators=field_validators,
required=False,
widget=forms.Textarea
)
kwargs = {
'initial': self.value,
'label': _(self.name),
'help_text': _(self.description) if self.description else None,
'validators': field_validators,
'required': False,
}
# Default input
default_input = forms.CharField
if self.normalize_to == 'integer':
default_input = forms.IntegerField
if self.normalize_to == 'float':
default_input = forms.FloatField
# Make text-input
return default_input(**kwargs)
| gpl-3.0 |
ppizarror/Hero-of-Antair | bin/MySQLdb/cursors.py | 1 | 18173 | """MySQLdb Cursors
This module implements Cursors of various types for MySQLdb. By
default, MySQLdb uses the Cursor class.
"""
import re
import sys
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
try:
from types import ListType, TupleType, UnicodeType
except ImportError:
# Python 3
ListType = list
TupleType = tuple
UnicodeType = str
restr = r"""
\s
values
\s*
(
\(
[^()']*
(?:
(?:
(?:\(
# ( - editor hightlighting helper
.*
\))
|
'
[^\\']*
(?:\\.[^\\']*)*
'
)
[^()']*
)*
\)
)
"""
insert_values = re.compile(restr, re.S | re.I | re.X)
class BaseCursor(object):
"""A base for Cursor classes. Useful attributes:
description
A tuple of DB API 7-tuples describing the columns in
the last executed query; see PEP-249 for details.
description_flags
Tuple of column flags for last query, one entry per column
in the result set. Values correspond to those in
MySQLdb.constants.FLAG. See MySQL documentation (C API)
for more information. Non-standard extension.
arraysize
default number of rows fetchmany() will fetch
"""
from _mysql_exceptions import MySQLError, Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError
_defer_warnings = False
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.description_flags = None
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.lastrowid = None
self.messages = []
self.errorhandler = connection.errorhandler
self._result = None
self._warnings = 0
self._info = None
self.rownumber = None
def __del__(self):
self.close()
self.errorhandler = None
self._result = None
def close(self):
"""Close the cursor. No further queries will be possible."""
if not self.connection: return
while self.nextset(): pass
self.connection = None
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def _warning_check(self):
from warnings import warn
if self._warnings:
warnings = self._get_db().show_warnings()
if warnings:
# This is done in two loops in case
# Warnings are set to raise exceptions.
for w in warnings:
self.messages.append((self.Warning, w))
for w in warnings:
warn(w[-1], self.Warning, 3)
elif self._info:
self.messages.append((self.Warning, self._info))
warn(self._info, self.Warning, 3)
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
def _post_get_result(self):
pass
def _do_get_result(self):
db = self._get_db()
self._result = self._get_result()
self.rowcount = db.affected_rows()
self.rownumber = 0
self.description = self._result and self._result.describe() or None
self.description_flags = self._result and self._result.field_flags() or None
self.lastrowid = db.insert_id()
self._warnings = db.warning_count()
self._info = db.info()
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
del self.messages[:]
db = self._get_db()
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
if args is not None:
if isinstance(args, dict):
query = query % dict((key, db.literal(item))
for key, item in args.iteritems())
else:
query = query % tuple([db.literal(item) for item in args])
try:
r = None
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = []
for a in args:
if isinstance(a, dict):
q.append(qv % dict((key, db.literal(item))
for key, item in a.iteritems()))
else:
q.append(qv % tuple([db.literal(item) for item in a]))
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.errorhandler(self, TypeError, msg)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
db = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index,
db.literal(arg))
if isinstance(q, unicode):
q = q.encode(db.unicode_literal.charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if type(q) is UnicodeType:
q = q.encode(db.unicode_literal.charset)
self._query(q)
self._executed = q
if not self._defer_warnings: self._warning_check()
return args
def _do_query(self, q):
db = self._get_db()
self._last_executed = q
db.query(q)
self._do_get_result()
return self.rowcount
def _query(self, q):
return self._do_query(q)
def _fetch_row(self, size=1):
if not self._result:
return ()
return self._result.fetch_row(size, self._fetch_type)
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class CursorStoreResultMixIn(object):
"""This is a MixIn class which causes the entire result set to be
stored on the client side, i.e. it uses mysql_store_result(). If the
result set can be very large, consider adding a LIMIT clause to your
query, or using CursorUseResultMixIn instead."""
def _get_result(self):
return self._get_db().store_result()
def _query(self, q):
rowcount = self._do_query(q)
self._post_get_result()
return rowcount
def _post_get_result(self):
self._rows = self._fetch_row(0)
self._result = None
def fetchone(self):
"""Fetches a single row from the cursor. None indicates that
no more rows are available."""
self._check_executed()
if self.rownumber >= len(self._rows): return None
result = self._rows[self.rownumber]
self.rownumber = self.rownumber + 1
return result
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position according
to mode.
If mode is 'relative' (default), value is taken as offset to
the current position in the result set, if set to 'absolute',
value states an absolute target position."""
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % repr(mode))
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
class CursorUseResultMixIn(object):
"""This is a MixIn class which causes the result set to be stored
in the server and sent row-by-row to client side, i.e. it uses
mysql_use_result(). You MUST retrieve the entire result set and
close() the cursor before additional queries can be peformed on
the connection."""
_defer_warnings = True
def _get_result(self):
return self._get_db().use_result()
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
def __iter__(self):
return self
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
class CursorTupleRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as tuples,
which is the standard form required by DB API."""
_fetch_type = 0
class CursorDictRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as
dictionaries. This is a non-standard feature."""
_fetch_type = 1
def fetchoneDict(self):
"""Fetch a single row as a dictionary. Deprecated:
Use fetchone() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchoneDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchone()
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchmanyDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchmany(size)
def fetchallDict(self):
"""Fetch all available rows as a list of dictionaries. Deprecated:
Use fetchall() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchallDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchall()
class CursorOldDictRowsMixIn(CursorDictRowsMixIn):
"""This is a MixIn class that returns rows as dictionaries with
the same key convention as the old Mysqldb (MySQLmodule). Don't
use this."""
_fetch_type = 2
class Cursor(CursorStoreResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is the standard Cursor class that returns rows as tuples
and stores the result set in the client."""
class DictCursor(CursorStoreResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the client."""
class SSCursor(CursorUseResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as tuples and stores
the result set in the server."""
class SSDictCursor(CursorUseResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the server."""
| gpl-2.0 |
ducngtuan/my-python3-koans-solution | python3/koans/about_method_bindings.py | 1 | 2834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class:
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.__self__ == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.__func__(obj))
def test_functions_have_attributes(self):
obj = Class()
self.assertEqual(34, len(dir(function)))
self.assertEqual(True, dir(function) == dir(obj.method.__func__))
def test_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(26, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
with self.assertRaises(AttributeError): obj.method.cherries = 3
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.__func__.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
with self.assertRaises(AttributeError): cls = function2.get_fruit.__self__
# ------------------------------------------------------------------
class BoundClass:
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', type(bound_obj).__name__)
self.assertEqual('AboutMethodBindings', type(binding_owner).__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor:
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment_changes(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
| mit |
Catstyle/mongoengine | tests/queryset/visitor.py | 31 | 11227 | import sys
sys.path[0:0] = [""]
import unittest
from bson import ObjectId
from datetime import datetime
from mongoengine import *
from mongoengine.queryset import Q
from mongoengine.errors import InvalidQueryError
__all__ = ("QTest",)
class QTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
class Person(Document):
name = StringField()
age = IntField()
meta = {'allow_inheritance': True}
Person.drop_collection()
self.Person = Person
def test_empty_q(self):
"""Ensure that empty Q objects won't hurt.
"""
q1 = Q()
q2 = Q(age__gte=18)
q3 = Q()
q4 = Q(name='test')
q5 = Q()
class Person(Document):
name = StringField()
age = IntField()
query = {'$or': [{'age': {'$gte': 18}}, {'name': 'test'}]}
self.assertEqual((q1 | q2 | q3 | q4 | q5).to_query(Person), query)
query = {'age': {'$gte': 18}, 'name': 'test'}
self.assertEqual((q1 & q2 & q3 & q4 & q5).to_query(Person), query)
def test_q_with_dbref(self):
"""Ensure Q objects handle DBRefs correctly"""
connect(db='mongoenginetest')
class User(Document):
pass
class Post(Document):
created_user = ReferenceField(User)
user = User.objects.create()
Post.objects.create(created_user=user)
self.assertEqual(Post.objects.filter(created_user=user).count(), 1)
self.assertEqual(Post.objects.filter(Q(created_user=user)).count(), 1)
def test_and_combination(self):
"""Ensure that Q-objects correctly AND together.
"""
class TestDoc(Document):
x = IntField()
y = StringField()
query = (Q(x__lt=7) & Q(x__lt=3)).to_query(TestDoc)
self.assertEqual(query, {'$and': [{'x': {'$lt': 7}}, {'x': {'$lt': 3}}]})
query = (Q(y="a") & Q(x__lt=7) & Q(x__lt=3)).to_query(TestDoc)
self.assertEqual(query, {'$and': [{'y': "a"}, {'x': {'$lt': 7}}, {'x': {'$lt': 3}}]})
# Check normal cases work without an error
query = Q(x__lt=7) & Q(x__gt=3)
q1 = Q(x__lt=7)
q2 = Q(x__gt=3)
query = (q1 & q2).to_query(TestDoc)
self.assertEqual(query, {'x': {'$lt': 7, '$gt': 3}})
# More complex nested example
query = Q(x__lt=100) & Q(y__ne='NotMyString')
query &= Q(y__in=['a', 'b', 'c']) & Q(x__gt=-100)
mongo_query = {
'x': {'$lt': 100, '$gt': -100},
'y': {'$ne': 'NotMyString', '$in': ['a', 'b', 'c']},
}
self.assertEqual(query.to_query(TestDoc), mongo_query)
def test_or_combination(self):
"""Ensure that Q-objects correctly OR together.
"""
class TestDoc(Document):
x = IntField()
q1 = Q(x__lt=3)
q2 = Q(x__gt=7)
query = (q1 | q2).to_query(TestDoc)
self.assertEqual(query, {
'$or': [
{'x': {'$lt': 3}},
{'x': {'$gt': 7}},
]
})
def test_and_or_combination(self):
"""Ensure that Q-objects handle ANDing ORed components.
"""
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
query = (Q(x__gt=0) | Q(x__exists=False))
query &= Q(x__lt=100)
self.assertEqual(query.to_query(TestDoc), {'$and': [
{'$or': [{'x': {'$gt': 0}},
{'x': {'$exists': False}}]},
{'x': {'$lt': 100}}]
})
q1 = (Q(x__gt=0) | Q(x__exists=False))
q2 = (Q(x__lt=100) | Q(y=True))
query = (q1 & q2).to_query(TestDoc)
TestDoc(x=101).save()
TestDoc(x=10).save()
TestDoc(y=True).save()
self.assertEqual(query,
{'$and': [
{'$or': [{'x': {'$gt': 0}}, {'x': {'$exists': False}}]},
{'$or': [{'x': {'$lt': 100}}, {'y': True}]}
]})
self.assertEqual(2, TestDoc.objects(q1 & q2).count())
def test_or_and_or_combination(self):
"""Ensure that Q-objects handle ORing ANDed ORed components. :)
"""
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
TestDoc(x=-1, y=True).save()
TestDoc(x=101, y=True).save()
TestDoc(x=99, y=False).save()
TestDoc(x=101, y=False).save()
q1 = (Q(x__gt=0) & (Q(y=True) | Q(y__exists=False)))
q2 = (Q(x__lt=100) & (Q(y=False) | Q(y__exists=False)))
query = (q1 | q2).to_query(TestDoc)
self.assertEqual(query,
{'$or': [
{'$and': [{'x': {'$gt': 0}},
{'$or': [{'y': True}, {'y': {'$exists': False}}]}]},
{'$and': [{'x': {'$lt': 100}},
{'$or': [{'y': False}, {'y': {'$exists': False}}]}]}
]}
)
self.assertEqual(2, TestDoc.objects(q1 | q2).count())
def test_multiple_occurence_in_field(self):
class Test(Document):
name = StringField(max_length=40)
title = StringField(max_length=40)
q1 = Q(name__contains='te') | Q(title__contains='te')
q2 = Q(name__contains='12') | Q(title__contains='12')
q3 = q1 & q2
query = q3.to_query(Test)
self.assertEqual(query["$and"][0], q1.to_query(Test))
self.assertEqual(query["$and"][1], q2.to_query(Test))
def test_q_clone(self):
class TestDoc(Document):
x = IntField()
TestDoc.drop_collection()
for i in xrange(1, 101):
t = TestDoc(x=i)
t.save()
# Check normal cases work without an error
test = TestDoc.objects(Q(x__lt=7) & Q(x__gt=3))
self.assertEqual(test.count(), 3)
test2 = test.clone()
self.assertEqual(test2.count(), 3)
self.assertFalse(test2 == test)
test3 = test2.filter(x=6)
self.assertEqual(test3.count(), 1)
self.assertEqual(test.count(), 3)
def test_q(self):
"""Ensure that Q objects may be used to query for documents.
"""
class BlogPost(Document):
title = StringField()
publish_date = DateTimeField()
published = BooleanField()
BlogPost.drop_collection()
post1 = BlogPost(title='Test 1', publish_date=datetime(2010, 1, 8), published=False)
post1.save()
post2 = BlogPost(title='Test 2', publish_date=datetime(2010, 1, 15), published=True)
post2.save()
post3 = BlogPost(title='Test 3', published=True)
post3.save()
post4 = BlogPost(title='Test 4', publish_date=datetime(2010, 1, 8))
post4.save()
post5 = BlogPost(title='Test 1', publish_date=datetime(2010, 1, 15))
post5.save()
post6 = BlogPost(title='Test 1', published=False)
post6.save()
# Check ObjectId lookup works
obj = BlogPost.objects(id=post1.id).first()
self.assertEqual(obj, post1)
# Check Q object combination with one does not exist
q = BlogPost.objects(Q(title='Test 5') | Q(published=True))
posts = [post.id for post in q]
published_posts = (post2, post3)
self.assertTrue(all(obj.id in posts for obj in published_posts))
q = BlogPost.objects(Q(title='Test 1') | Q(published=True))
posts = [post.id for post in q]
published_posts = (post1, post2, post3, post5, post6)
self.assertTrue(all(obj.id in posts for obj in published_posts))
# Check Q object combination
date = datetime(2010, 1, 10)
q = BlogPost.objects(Q(publish_date__lte=date) | Q(published=True))
posts = [post.id for post in q]
published_posts = (post1, post2, post3, post4)
self.assertTrue(all(obj.id in posts for obj in published_posts))
self.assertFalse(any(obj.id in posts for obj in [post5, post6]))
BlogPost.drop_collection()
# Check the 'in' operator
self.Person(name='user1', age=20).save()
self.Person(name='user2', age=20).save()
self.Person(name='user3', age=30).save()
self.Person(name='user4', age=40).save()
self.assertEqual(self.Person.objects(Q(age__in=[20])).count(), 2)
self.assertEqual(self.Person.objects(Q(age__in=[20, 30])).count(), 3)
# Test invalid query objs
def wrong_query_objs():
self.Person.objects('user1')
def wrong_query_objs_filter():
self.Person.objects('user1')
self.assertRaises(InvalidQueryError, wrong_query_objs)
self.assertRaises(InvalidQueryError, wrong_query_objs_filter)
def test_q_regex(self):
"""Ensure that Q objects can be queried using regexes.
"""
person = self.Person(name='Guido van Rossum')
person.save()
import re
obj = self.Person.objects(Q(name=re.compile('^Gui'))).first()
self.assertEqual(obj, person)
obj = self.Person.objects(Q(name=re.compile('^gui'))).first()
self.assertEqual(obj, None)
obj = self.Person.objects(Q(name=re.compile('^gui', re.I))).first()
self.assertEqual(obj, person)
obj = self.Person.objects(Q(name__not=re.compile('^bob'))).first()
self.assertEqual(obj, person)
obj = self.Person.objects(Q(name__not=re.compile('^Gui'))).first()
self.assertEqual(obj, None)
def test_q_lists(self):
"""Ensure that Q objects query ListFields correctly.
"""
class BlogPost(Document):
tags = ListField(StringField())
BlogPost.drop_collection()
BlogPost(tags=['python', 'mongo']).save()
BlogPost(tags=['python']).save()
self.assertEqual(BlogPost.objects(Q(tags='mongo')).count(), 1)
self.assertEqual(BlogPost.objects(Q(tags='python')).count(), 2)
BlogPost.drop_collection()
def test_q_merge_queries_edge_case(self):
class User(Document):
email = EmailField(required=False)
name = StringField()
User.drop_collection()
pk = ObjectId()
User(email='example@example.com', pk=pk).save()
self.assertEqual(1, User.objects.filter(Q(email='example@example.com') |
Q(name='John Doe')).limit(2).filter(pk=pk).count())
def test_chained_q_or_filtering(self):
class Post(EmbeddedDocument):
name = StringField(required=True)
class Item(Document):
postables = ListField(EmbeddedDocumentField(Post))
Item.drop_collection()
Item(postables=[Post(name="a"), Post(name="b")]).save()
Item(postables=[Post(name="a"), Post(name="c")]).save()
Item(postables=[Post(name="a"), Post(name="b"), Post(name="c")]).save()
self.assertEqual(Item.objects(Q(postables__name="a") & Q(postables__name="b")).count(), 2)
self.assertEqual(Item.objects.filter(postables__name="a").filter(postables__name="b").count(), 2)
if __name__ == '__main__':
unittest.main()
| mit |
zshanwei/zshanwei.github.io | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| mit |
Azure/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_04_01/operations/_operations.py | 1 | 3943 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2015_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.OperationListResult"
"""Lists all of the available operations from Microsoft.Insights provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationListResult, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2015_04_01.models.OperationListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OperationListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/providers/microsoft.insights/operations'} # type: ignore
| mit |
themiken/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/run_tests.py | 199 | 2336 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runs the specified tests for Google Test.
This script requires Python 2.3 or higher. To learn the usage, run it
with -h.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(__file__) or '.'
sys.path.append(os.path.join(SCRIPT_DIR, 'test'))
import run_tests_util
def _Main():
"""Runs all tests for Google Test."""
options, args = run_tests_util.ParseArgs('gtest')
test_runner = run_tests_util.TestRunner(script_dir=SCRIPT_DIR)
tests = test_runner.GetTestsToRun(args,
options.configurations,
options.built_configurations)
if not tests:
sys.exit(1) # Incorrect parameters given, abort execution.
sys.exit(test_runner.RunTests(tests[0], tests[1]))
if __name__ == '__main__':
_Main()
| gpl-3.0 |
myarjunar/QGIS | python/plugins/processing/tools/system.py | 2 | 4090 | # -*- coding: utf-8 -*-
"""
***************************************************************************
py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import time
import sys
import uuid
import math
from qgis.PyQt.QtCore import QDir
from qgis.core import QgsApplication
numExported = 1
def userFolder():
userDir = os.path.join(QgsApplication.qgisSettingsDirPath(), 'processing')
if not QDir(userDir).exists():
QDir().mkpath(userDir)
return str(QDir.toNativeSeparators(userDir))
def defaultOutputFolder():
folder = os.path.join(userFolder(), 'outputs')
if not QDir(folder).exists():
QDir().mkpath(folder)
return str(QDir.toNativeSeparators(folder))
def isWindows():
return os.name == 'nt'
def isMac():
return sys.platform == 'darwin'
_tempFolderSuffix = uuid.uuid4().hex
def tempFolder():
tempDir = os.path.join(str(QDir.tempPath()), 'processing' + _tempFolderSuffix)
if not QDir(tempDir).exists():
QDir().mkpath(tempDir)
return str(os.path.abspath(tempDir))
def setTempOutput(out, alg):
if hasattr(out, 'directory'):
out.value = getTempDirInTempFolder()
else:
ext = out.getDefaultFileExtension(alg)
out.value = getTempFilenameInTempFolder(out.name + '.' + ext)
def getTempFilename(ext=None):
tmpPath = tempFolder()
t = time.time()
m = math.floor(t)
uid = '{:8x}{:05x}'.format(m, int((t - m) * 1000000))
if ext is None:
filename = os.path.join(tmpPath, '{}{}'.format(uid, getNumExportedLayers()))
else:
filename = os.path.join(tmpPath, '{}{}.{}'.format(uid, getNumExportedLayers(), ext))
return filename
def getTempFilenameInTempFolder(basename):
"""Returns a temporary filename for a given file, putting it into
a temp folder but not changing its basename.
"""
path = tempFolder()
path = os.path.join(path, uuid.uuid4().hex)
mkdir(path)
basename = removeInvalidChars(basename)
filename = os.path.join(path, basename)
return filename
def getTempDirInTempFolder():
"""Returns a temporary directory, putting it into a temp folder.
"""
path = tempFolder()
path = os.path.join(path, uuid.uuid4().hex)
mkdir(path)
return path
def removeInvalidChars(string):
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:.'
string = ''.join(c for c in string if c in validChars)
return string
def getNumExportedLayers():
global numExported
numExported += 1
return numExported
def mkdir(newdir):
newdir = newdir.strip('\n\r ')
if os.path.isdir(newdir):
pass
else:
(head, tail) = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
os.mkdir(newdir)
def escapeAndJoin(strList):
joined = ''
for s in strList:
if s[0] != '-' and ' ' in s:
escaped = '"' + s.replace('\\', '\\\\').replace('"', '\\"') \
+ '"'
else:
escaped = s
joined += escaped + ' '
return joined.strip()
| gpl-2.0 |
SnappleCap/oh-mainline | vendor/packages/Jinja2/jinja2/environment.py | 614 | 47244 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| agpl-3.0 |
rpp0/peapwn | mods/hostap/tests/hwsim/test_p2p_invitation.py | 2 | 3747 | #!/usr/bin/python
#
# P2P invitation test cases
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import hwsim_utils
def test_p2p_go_invite(dev):
"""P2P GO inviting a client to join"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
logger.info("Generate BSS table entry for old group")
# this adds more coverage to testing by forcing the GO to be found with an
# older entry in the BSS table and with that entry having a different
# operating channel.
dev[0].p2p_start_go(freq=2422)
dev[1].scan()
dev[0].remove_group()
logger.info("Discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Peer " + addr1 + " not found")
logger.info("Start GO on non-social channel")
res = dev[0].p2p_start_go(freq=2417)
logger.debug("res: " + str(res))
logger.info("Invite peer to join the group")
dev[0].global_request("P2P_INVITE group=" + dev[0].group_ifname + " peer=" + addr1)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED"], timeout=10)
if ev is None:
raise Exception("Timeout on invitation on peer")
ev = dev[0].wait_global_event(["P2P-INVITATION-RESULT"], timeout=10)
if ev is None:
raise Exception("Timeout on invitation on GO")
if "status=1" not in ev:
raise Exception("Unexpected invitation result")
logger.info("Join the group")
pin = dev[1].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
dev[1].p2p_connect_group(addr0, pin, timeout=60)
logger.info("Client connected")
hwsim_utils.test_connectivity_p2p(dev[0], dev[1])
logger.info("Terminate group")
dev[0].remove_group()
dev[1].wait_go_ending_session()
def test_p2p_go_invite_auth(dev):
"""P2P GO inviting a client to join (authorized invitation)"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
logger.info("Generate BSS table entry for old group")
# this adds more coverage to testing by forcing the GO to be found with an
# older entry in the BSS table and with that entry having a different
# operating channel.
dev[0].p2p_start_go(freq=2432)
dev[1].scan()
dev[0].remove_group()
dev[0].dump_monitor()
dev[1].dump_monitor()
logger.info("Discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1, social=True):
raise Exception("Peer " + addr1 + " not found")
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0, social=True):
raise Exception("Peer " + addr0 + " not found")
dev[1].p2p_listen()
logger.info("Authorize invitation")
pin = dev[1].wps_read_pin()
dev[1].global_request("P2P_CONNECT " + addr0 + " " + pin + " join auth")
logger.info("Start GO on non-social channel")
res = dev[0].p2p_start_go(freq=2427)
logger.debug("res: " + str(res))
logger.info("Invite peer to join the group")
dev[0].p2p_go_authorize_client(pin)
dev[0].global_request("P2P_INVITE group=" + dev[0].group_ifname + " peer=" + addr1)
ev = dev[1].wait_global_event(["P2P-INVITATION-RECEIVED",
"P2P-GROUP-STARTED"], timeout=20)
if ev is None:
raise Exception("Timeout on invitation on peer")
if "P2P-INVITATION-RECEIVED" in ev:
raise Exception("Unexpected request to accept pre-authorized invitaton")
dev[0].dump_monitor()
logger.info("Client connected")
hwsim_utils.test_connectivity_p2p(dev[0], dev[1])
logger.info("Terminate group")
dev[0].remove_group()
dev[1].wait_go_ending_session()
| gpl-2.0 |
jeasoft/odoo | marcos_addons/marcos_stock/models.py | 2 | 9346 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, exceptions, api
from openerp.tools.translate import _
from openerp.osv import osv, fields as old_fields
import time
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
class stock_inventory(osv.Model):
_inherit = "stock.inventory"
def _get_available_filters(self, cr, uid, context=None):
res = super(stock_inventory, self)._get_available_filters(cr, uid, context=context)
mode_to_add = [("invenory_plus", "Importar desde los codigos y sumar al inventario actual"),
("inventory_update", "Importar desde los codigos y actualizar el inventario actual")
]
res += mode_to_add
return res
_columns = {
'filter': old_fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
}
def prepare_inventory(self, cr, uid, ids, context=None):
invetory = self.browse(cr, uid, ids, context=context)
if invetory.filter in ["invenory_plus", "inventory_update"]:
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
else:
return super(stock_inventory, self).prepare_inventory(cr, uid, ids, context=context)
class stock_quant(models.Model):
_inherit = "stock.quant"
@api.model
def get_product_qty_by_location(self, product_id):
self._cr.execute("""SELECT "stock_location"."location_id",
"stock_location"."name",
"product_product"."id",
"product_product"."name_template",
SUM( "stock_quant"."qty" )
FROM "stock_quant"
INNER JOIN "product_product" ON "stock_quant"."product_id" = "product_product"."id"
INNER JOIN "stock_location" ON "stock_quant"."location_id" = "stock_location"."id"
WHERE "product_product"."id" = %(id)s AND"stock_location"."usage" = 'internal'
GROUP BY
"stock_location"."location_id",
"stock_location"."name",
"product_product"."id",
"product_product"."name_template"
""", {"id": product_id})
result = self._cr.fetchall()
if result:
return result
else:
return []
@api.model
def get_product_qty_in_location(self, product_id, location_id):
self._cr.execute("""SELECT "stock_location"."location_id",
"stock_location"."name",
"product_product"."id",
"product_product"."name_template",
SUM( "stock_quant"."qty" )
FROM "stock_quant"
INNER JOIN "product_product" ON "stock_quant"."product_id" = "product_product"."id"
INNER JOIN "stock_location" ON "stock_quant"."location_id" = "stock_location"."id"
WHERE "product_product"."id" = %(product_id)s
AND "stock_location"."id" = %(location_id)s
AND "stock_location"."usage" = 'internal'
GROUP BY
"stock_location"."location_id",
"stock_location"."name",
"product_product"."id",
"product_product"."name_template"
""", {"product_id": product_id, "location_id": location_id})
result = self._cr.fetchall()
if result:
return result
return []
class stock_picking(models.Model):
_inherit = "stock.picking"
afecta = fields.Many2one("account.invoice")
@api.model
def do_transfer(self):
res = super(stock_picking, self).do_transfer()
for picking in self:
if picking.group_id:
group_name = picking.group_id.name
so_id = self.pool.get("sale.order").search(self._cr, self._uid, [("name", "=", picking.group_id.name)])
po_id = self.pool.get("purchase.order").search(self._cr, self._uid,
[("name", "=", picking.group_id.name)])
if so_id:
origin = self.pool.get("sale.order").browse(self._cr, self._uid, so_id, context=self._context)
elif po_id:
origin = self.pool.get("purchase.order").browse(self._cr, self._uid, po_id, context=self._context)
if origin.invoice_ids and not picking.invoice_id:
if not picking.afecta:
picking.invoice_id = origin.invoice_ids[0].id
return res
class stock_location(models.Model):
_inherit = "stock.location"
property_stock_valuation_account_id = fields.Many2one('account.account', company_dependent=True,
string="Stock Valuation Account",
help="When real-time inventory valuation is enabled on a product, this account will hold the current value of the products.", )
class product_template(osv.osv):
_inherit = 'product.template'
def get_product_accounts(self, cr, uid, product_id, context=None):
if context is None:
context = {}
product_obj = self.browse(cr, uid, product_id, context=context)
stock_input_acc = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id or False
if not stock_input_acc:
stock_input_acc = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id or False
stock_output_acc = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id or False
if not stock_output_acc:
stock_output_acc = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id or False
journal_id = product_obj.categ_id.property_stock_journal and product_obj.categ_id.property_stock_journal.id or False
# marcos check if account is declare in location
account_valuation = False
if context.get("active_model", False) == "stock.picking":
picking_id = self.pool.get("stock.picking").browse(cr, uid, context["active_id"])
account_valuation = picking_id.location_id.property_stock_valuation_account_id.id or picking_id.location_dest_id.property_stock_valuation_account_id.id
if not account_valuation:
account_valuation = product_obj.categ_id.property_stock_valuation_account_id and product_obj.categ_id.property_stock_valuation_account_id.id or False
if not all([stock_input_acc, stock_output_acc, account_valuation, journal_id]):
raise osv.except_osv(_('Error!'), _('''One of the following information is missing on the product or product category and prevents the accounting valuation entries to be created:
Product: %s
Stock Input Account: %s
Stock Output Account: %s
Stock Valuation Account: %s
Stock Journal: %s
''') % (product_obj.name, stock_input_acc, stock_output_acc, account_valuation, journal_id))
return {
'stock_account_input': stock_input_acc,
'stock_account_output': stock_output_acc,
'stock_journal': journal_id,
'property_stock_valuation_account_id': account_valuation
}
| agpl-3.0 |
HiroIshikawa/21playground | visualizer/_app_boilerplate/venv/lib/python3.5/site-packages/setuptools/site-patch.py | 720 | 2389 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| mit |
alexanderturner/ansible | lib/ansible/modules/cloud/google/gcdns_zone.py | 25 | 12939 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 CallFire Inc.
#
# This file is part of Ansible.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gcdns_zone
short_description: Creates or removes zones in Google Cloud DNS
description:
- Creates or removes managed zones in Google Cloud DNS.
version_added: "2.2"
author: "William Albert (@walbert947)"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
options:
state:
description:
- Whether the given zone should or should not be present.
required: false
choices: ["present", "absent"]
default: "present"
zone:
description:
- The DNS domain name of the zone.
- This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
you attempt to specify a zone ID, this module will attempt to
create a TLD and will fail.
required: true
aliases: ['name']
description:
description:
- An arbitrary text string to use for the zone description.
required: false
default: ""
service_account_email:
description:
- The e-mail address for a service account with access to Google
Cloud DNS.
required: false
default: null
pem_file:
description:
- The path to the PEM file associated with the service account
email.
- This option is deprecated and may be removed in a future release.
Use I(credentials_file) instead.
required: false
default: null
credentials_file:
description:
- The path to the JSON file associated with the service account
email.
required: false
default: null
project_id:
description:
- The Google Cloud Platform project ID to use.
required: false
default: null
notes:
- See also M(gcdns_record).
- Zones that are newly created must still be set up with a domain registrar
before they can be used.
'''
EXAMPLES = '''
# Basic zone creation example.
- name: Create a basic zone with the minimum number of parameters.
gcdns_zone: zone=example.com
# Zone removal example.
- name: Remove a zone.
gcdns_zone: zone=example.com state=absent
# Zone creation with description
- name: Creating a zone with a description
gcdns_zone: zone=example.com description="This is an awesome zone"
'''
RETURN = '''
description:
description: The zone's description
returned: success
type: string
sample: This is an awesome zone
state:
description: Whether the zone is present or absent
returned: success
type: string
sample: present
zone:
description: The zone's DNS name
returned: success
type: string
sample: example.com.
'''
################################################################################
# Imports
################################################################################
from distutils.version import LooseVersion
try:
from libcloud import __version__ as LIBCLOUD_VERSION
from libcloud.common.google import InvalidRequestError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
from libcloud.dns.types import Provider
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
################################################################################
# Constants
################################################################################
# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
# v1 API. Earlier versions contained the beta v1 API, which has since been
# deprecated and decommissioned.
MINIMUM_LIBCLOUD_VERSION = '0.19.0'
# The libcloud Google Cloud DNS provider.
PROVIDER = Provider.GOOGLE
# The URL used to verify ownership of a zone in Google Cloud DNS.
ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
################################################################################
# Functions
################################################################################
def create_zone(module, gcdns, zone):
"""Creates a new Google Cloud DNS zone."""
description = module.params['description']
extra = dict(description = description)
zone_name = module.params['zone']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
zone_name = zone_name + '.'
# If we got a zone back, then the domain exists.
if zone is not None:
return False
# The zone doesn't exist yet.
try:
if not module.check_mode:
gcdns.create_zone(domain=zone_name, extra=extra)
return True
except ResourceExistsError:
# The zone already exists. We checked for this already, so either
# Google is lying, or someone was a ninja and created the zone
# within milliseconds of us checking for its existence. In any case,
# the zone has already been created, so we have nothing more to do.
return False
except InvalidRequestError as error:
if error.code == 'invalid':
# The zone name or a parameter might be completely invalid. This is
# typically caused by an illegal DNS name (e.g. foo..com).
module.fail_json(
msg = "zone name is not a valid DNS name: %s" % zone_name,
changed = False
)
elif error.code == 'managedZoneDnsNameNotAvailable':
# Google Cloud DNS will refuse to create zones with certain domain
# names, such as TLDs, ccTLDs, or special domain names such as
# example.com.
module.fail_json(
msg = "zone name is reserved or already in use: %s" % zone_name,
changed = False
)
elif error.code == 'verifyManagedZoneDnsNameOwnership':
# This domain name needs to be verified before Google will create
# it. This occurs when a user attempts to create a zone which shares
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
module.fail_json(
msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
changed = False
)
else:
# The error is something else that we don't know how to handle,
# so we'll just re-raise the exception.
raise
def remove_zone(module, gcdns, zone):
"""Removes an existing Google Cloud DNS zone."""
# If there's no zone, then we're obviously done.
if zone is None:
return False
# An empty zone will have two resource records:
# 1. An NS record with a list of authoritative name servers
# 2. An SOA record
# If any additional resource records are present, Google Cloud DNS will
# refuse to remove the zone.
if len(zone.list_records()) > 2:
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
)
try:
if not module.check_mode:
gcdns.delete_zone(zone)
return True
except ResourceNotFoundError:
# When we performed our check, the zone existed. It may have been
# deleted by something else. It's gone, so whatever.
return False
except InvalidRequestError as error:
if error.code == 'containerNotEmpty':
# When we performed our check, the zone existed and was empty. In
# the milliseconds between the check and the removal command,
# records were added to the zone.
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
)
else:
# The error is something else that we don't know how to handle,
# so we'll just re-raise the exception.
raise
def _get_zone(gcdns, zone_name):
"""Gets the zone object for a given domain name."""
# To create a zone, we need to supply a zone name. However, to delete a
# zone, we need to supply a zone ID. Zone ID's are often based on zone
# names, but that's not guaranteed, so we'll iterate through the list of
# zones to see if we can find a matching name.
available_zones = gcdns.iterate_zones()
found_zone = None
for zone in available_zones:
if zone.domain == zone_name:
found_zone = zone
break
return found_zone
def _sanity_check(module):
"""Run module sanity checks."""
zone_name = module.params['zone']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
# Google Cloud DNS does not support the creation of TLDs.
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
module.fail_json(
msg = 'cannot create top-level domain: %s' % zone_name,
changed = False
)
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
zone = dict(required=True, aliases=['name'], type='str'),
description = dict(default='', type='str'),
service_account_email = dict(type='str'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(type='str')
),
supports_check_mode = True
)
_sanity_check(module)
zone_name = module.params['zone']
state = module.params['state']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
zone_name = zone_name + '.'
json_output = dict(
state = state,
zone = zone_name,
description = module.params['description']
)
# Build a connection object that was can use to connect with Google
# Cloud DNS.
gcdns = gcdns_connect(module, provider=PROVIDER)
# We need to check if the zone we're attempting to create already exists.
zone = _get_zone(gcdns, zone_name)
diff = dict()
# Build the 'before' diff
if zone is None:
diff['before'] = ''
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
zone = zone.domain,
description = zone.extra['description']
)
diff['before_header'] = zone_name
# Create or remove the zone.
if state == 'present':
diff['after'] = dict(
zone = zone_name,
description = module.params['description']
)
diff['after_header'] = zone_name
changed = create_zone(module, gcdns, zone)
elif state == 'absent':
diff['after'] = ''
diff['after_header'] = '<absent>'
changed = remove_zone(module, gcdns, zone)
module.exit_json(changed=changed, diff=diff, **json_output)
from ansible.module_utils.basic import *
from ansible.module_utils.gcdns import *
if __name__ == '__main__':
main()
| gpl-3.0 |
lpirl/ansible | lib/ansible/plugins/callback/tree.py | 16 | 2413 | # (c) 2012-2014, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.callback import CallbackBase
from ansible.utils.path import makedirs_safe
from ansible.utils.unicode import to_bytes
from ansible.constants import TREE_DIR
class CallbackModule(CallbackBase):
'''
This callback puts results into a host specific file in a directory in json format.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'tree'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self.tree = TREE_DIR
if not self.tree:
self.tree = os.path.expanduser("~/.ansible/tree")
self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
def write_tree_file(self, hostname, buf):
''' write something into treedir/hostname '''
buf = to_bytes(buf)
try:
makedirs_safe(self.tree)
path = os.path.join(self.tree, hostname)
with open(path, 'wb+') as fd:
fd.write(buf)
except (OSError, IOError) as e:
self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e)))
def result_to_tree(self, result):
if self.tree:
self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
def v2_runner_on_ok(self, result):
self.result_to_tree(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
self.result_to_tree(result)
def v2_runner_on_unreachable(self, result):
self.result_to_tree(result)
| gpl-3.0 |
luogangyi/Ceilometer-oVirt | build/lib/ceilometer/tests/storage/test_impl_log.py | 6 | 1104 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_log.py
"""
from oslotest import base
from ceilometer.storage import impl_log
class ConnectionTest(base.BaseTestCase):
def test_get_connection(self):
conn = impl_log.Connection(None)
conn.record_metering_data({'counter_name': 'test',
'resource_id': __name__,
'counter_volume': 1,
})
| apache-2.0 |
Khan/git-bigfile | vendor/boto/glacier/exceptions.py | 185 | 2195 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
class UnexpectedHTTPResponseError(Exception):
def __init__(self, expected_responses, response):
self.status = response.status
self.body = response.read()
self.code = None
try:
body = json.loads(self.body)
self.code = body["code"]
msg = 'Expected %s, got ' % expected_responses
msg += '(%d, code=%s, message=%s)' % (response.status,
self.code,
body["message"])
except Exception:
msg = 'Expected %s, got (%d, %s)' % (expected_responses,
response.status,
self.body)
super(UnexpectedHTTPResponseError, self).__init__(msg)
class ArchiveError(Exception):
pass
class UploadArchiveError(ArchiveError):
pass
class DownloadArchiveError(ArchiveError):
pass
class TreeHashDoesNotMatchError(ArchiveError):
pass
| mit |
systembugtj/omaha | third_party/gmock/scripts/generator/cpp/tokenize.py | 679 | 9703 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c:
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| apache-2.0 |
newsteinking/docker | tests/functional/test_install_vcs_svn.py | 40 | 1055 | import pytest
from mock import patch
from pip.vcs.subversion import Subversion
@patch('pip.vcs.call_subprocess')
@pytest.mark.network
def test_obtain_should_recognize_auth_info_url(call_subprocess_mock, script):
svn = Subversion(url='svn+http://username:password@svn.example.com/')
svn.obtain(script.scratch_path / 'test')
assert call_subprocess_mock.call_args[0][0] == [
svn.name, 'checkout', '-q', '--username', 'username', '--password',
'password', 'http://username:password@svn.example.com/',
script.scratch_path / 'test',
]
@patch('pip.vcs.call_subprocess')
@pytest.mark.network
def test_export_should_recognize_auth_info_url(call_subprocess_mock, script):
svn = Subversion(url='svn+http://username:password@svn.example.com/')
svn.export(script.scratch_path / 'test')
assert call_subprocess_mock.call_args[0][0] == [
svn.name, 'export', '--username', 'username', '--password',
'password', 'http://username:password@svn.example.com/',
script.scratch_path / 'test',
]
| mit |
flos-club/synergy | ext/gmock/scripts/generator/cpp/tokenize.py | 679 | 9703 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c:
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| gpl-2.0 |
hoosteeno/kuma | vendor/packages/pygments/lexers/markup.py | 72 | 16886 | # -*- coding: utf-8 -*-
"""
pygments.lexers.markup
~~~~~~~~~~~~~~~~~~~~~~
Lexers for non-HTML markup languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.css import CssLexer
from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
using, this, do_insertions, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other
from pygments.util import get_bool_opt, ClassNotFound
__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
'MozPreprocHashLexer', 'MozPreprocPercentLexer',
'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
'MozPreprocCssLexer']
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'\}\}\}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'\{\{\{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'\\$', Keyword),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
.. versionadded:: 0.6
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]+', Text, 'textline'),
default('textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class MozPreprocHashLexer(RegexLexer):
"""
Lexer for Mozilla Preprocessor files (with '#' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozhashpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^#', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
'exprstart': [
(r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
(words((
'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
'include', 'includesubst', 'error')),
Comment.Preproc, '#pop'),
],
'expr': [
(words(('!', '!=', '==', '&&', '||')), Operator),
(r'(defined)(\()', bygroups(Keyword, Punctuation)),
(r'\)', Punctuation),
(r'[0-9]+', Number.Decimal),
(r'__\w+?__', Name.Variable),
(r'@\w+?@', Name.Class),
(r'\w+', Name),
(r'\n', Text, '#pop'),
(r'\s+', Text),
(r'\S', Punctuation),
],
}
class MozPreprocPercentLexer(MozPreprocHashLexer):
"""
Lexer for Mozilla Preprocessor files (with '%' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozpercentpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^%', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
}
class MozPreprocXulLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 2.0
"""
name = "XUL+mozpreproc"
aliases = ['xul+mozpreproc']
filenames = ['*.xul.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocXulLexer, self).__init__(
XmlLexer, MozPreprocHashLexer, **options)
class MozPreprocJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`JavascriptLexer`.
.. versionadded:: 2.0
"""
name = "Javascript+mozpreproc"
aliases = ['javascript+mozpreproc']
filenames = ['*.js.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocJavascriptLexer, self).__init__(
JavascriptLexer, MozPreprocHashLexer, **options)
class MozPreprocCssLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`CssLexer`.
.. versionadded:: 2.0
"""
name = "CSS+mozpreproc"
aliases = ['css+mozpreproc']
filenames = ['*.css.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocCssLexer, self).__init__(
CssLexer, MozPreprocPercentLexer, **options)
| mpl-2.0 |
maciekcc/tensorflow | tensorflow/contrib/image/__init__.py | 38 | 1673 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""##Ops for image manipulation.
### API
This module provides functions for image manipulation; currently, only
projective transforms (including rotation) are supported.
## Image `Ops`
@@angles_to_projective_transforms
@@compose_transforms
@@rotate
@@transform
@@bipartite_match
@@single_image_random_dot_stereograms
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=line-too-long
from tensorflow.contrib.image.python.ops.image_ops import angles_to_projective_transforms
from tensorflow.contrib.image.python.ops.image_ops import compose_transforms
from tensorflow.contrib.image.python.ops.image_ops import rotate
from tensorflow.contrib.image.python.ops.image_ops import transform
from tensorflow.contrib.image.python.ops.single_image_random_dot_stereograms import single_image_random_dot_stereograms
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
jpedroan/megua | megua/megsiacua.py | 1 | 4289 | # coding=utf-8
"""
MegSiacua -- Functions to work with one or a list of ExSiacua exercises.
READ THIS: MegBook inherits this class! MegBook is the author front-end.
AUTHORS:
- Pedro Cruz (2016-01): first modifications for use in SMC.
TESTS: check MegBook
"""
#*****************************************************************************
# Copyright (C) 2016 Pedro Cruz <PedroCruz@ua.pt>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
class MegSiacua:
r"""
MegSiacua -- Functions to work with one or a list of ExSiacua exercises.
See also MegBook that an author uses as a front-end.
"""
#TODO: rever isto tudo
def siacua(self,
#new fields
targetmachine=None,
targetusername="(no username)",
ekeys=[],
course="calculo3",
#siacua
level=1,
slip=0.05,
guess=0.25,
discr=0.5,
concepts = [ (0, 1) ],
#pmate
idtree=None,
#auxiliares
grid2x2=False,
verbose=False,
#old fields
usernamesiacua="(no username)",
siacuatest=None,
sendpost=True
):
r"""
INPUT:
OUTPUT:
- this command prints the list of sended exercises for the siacua system.
NOTE:
- you can export between 3 and 6 wrong options and 1 right.
TODO: securitykey: implemenent in a megua-server configuration file.
LINKS:
http://docs.python.org/2/library/json.html
http://stackoverflow.com/questions/7122015/sending-post-request-to-a-webservice-from-python
Algorithm:
1. Read from "%ANSWER" until "</generalfeedback>" and parse this xml string.
TESTS:
~/Dropbox/all/megua/archive$ sage jsontest.sage
"""
#Get summary, problem and answer and class_text
row = self.megbook_store.get_classrow(self._current_unique_name)
if not row:
print "megsiacua module: %s cannot be accessed on database." % self._current_unique_name
return
#Create an instance (ekey=0 because it needs one.)
ex_instance = self.exerciseinstance(row=row, ekey=0)
#exercise instance will sent instances to siacua
ex_instance.siacua(
targetmachine,
targetusername,
ekeys,
course,
#siacua
level,
slip,
guess,
discr,
concepts,
#pmate
idtree,
#auxiliares
grid2x2,
verbose,
#old fields
usernamesiacua,
siacuatest,
sendpost
)
#done
def siacuapreview(self,ekeys,unique_name=None):
r"""
INPUT:
- ``unique_name``: unique exercise name (name in "class E12X34_something_001(Exercise):").
- ``ekeys``: list of numbers that generate the same problem instance.
OUTPUT:
- this command writes an html file with all instances.
NOTE:
- you can export between 3 and 6 wrong options and 1 right.
EXAMPLE:
sage: ex.siacuapreview(ekeys=[1,2,5])
Algorithm:
1. Read from "%ANSWER" until "</generalfeedback>" and parse this xml string.
"""
if not unique_name:
unique_name = self._current_unique_name
#Get summary, problem and answer and class_text
row = self.megbook_store.get_classrow(unique_name)
if not row:
print "megsiacua module: %s cannot be accessed on database." % unique_name
return
#Create an instance (ekey=0 because it needs one.)
ex_instance = self.exerciseinstance(row=row, ekey=0)
#exercise instance will sent instances to siacua
ex_instance.siacuapreview(ekeys)
#end class MegSiacua
| gpl-3.0 |
idea4bsd/idea4bsd | python/helpers/profiler/thriftpy/protocol/binary.py | 35 | 10595 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import struct
from ..thrift import TType
from .exc import TProtocolException
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def pack_i8(byte):
return struct.pack("!b", byte)
def pack_i16(i16):
return struct.pack("!h", i16)
def pack_i32(i32):
return struct.pack("!i", i32)
def pack_i64(i64):
return struct.pack("!q", i64)
def pack_double(dub):
return struct.pack("!d", dub)
def pack_string(string):
return struct.pack("!i%ds" % len(string), len(string), string)
def unpack_i8(buf):
return struct.unpack("!b", buf)[0]
def unpack_i16(buf):
return struct.unpack("!h", buf)[0]
def unpack_i32(buf):
return struct.unpack("!i", buf)[0]
def unpack_i64(buf):
return struct.unpack("!q", buf)[0]
def unpack_double(buf):
return struct.unpack("!d", buf)[0]
def write_message_begin(outbuf, name, ttype, seqid, strict=True):
if strict:
outbuf.write(pack_i32(VERSION_1 | ttype))
outbuf.write(pack_string(name.encode('utf-8')))
else:
outbuf.write(pack_string(name.encode('utf-8')))
outbuf.write(pack_i8(ttype))
outbuf.write(pack_i32(seqid))
def write_field_begin(outbuf, ttype, fid):
outbuf.write(pack_i8(ttype) + pack_i16(fid))
def write_field_stop(outbuf):
outbuf.write(pack_i8(TType.STOP))
def write_list_begin(outbuf, etype, size):
outbuf.write(pack_i8(etype) + pack_i32(size))
def write_map_begin(outbuf, ktype, vtype, size):
outbuf.write(pack_i8(ktype) + pack_i8(vtype) + pack_i32(size))
def write_val(outbuf, ttype, val, spec=None):
if ttype == TType.BOOL:
if val:
outbuf.write(pack_i8(1))
else:
outbuf.write(pack_i8(0))
elif ttype == TType.BYTE:
outbuf.write(pack_i8(val))
elif ttype == TType.I16:
outbuf.write(pack_i16(val))
elif ttype == TType.I32:
outbuf.write(pack_i32(val))
elif ttype == TType.I64:
outbuf.write(pack_i64(val))
elif ttype == TType.DOUBLE:
outbuf.write(pack_double(val))
elif ttype == TType.STRING:
if not isinstance(val, bytes):
val = val.encode('utf-8')
outbuf.write(pack_string(val))
elif ttype == TType.SET or ttype == TType.LIST:
if isinstance(spec, tuple):
e_type, t_spec = spec[0], spec[1]
else:
e_type, t_spec = spec, None
val_len = len(val)
write_list_begin(outbuf, e_type, val_len)
for e_val in val:
write_val(outbuf, e_type, e_val, t_spec)
elif ttype == TType.MAP:
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
write_map_begin(outbuf, k_type, v_type, len(val))
for k in iter(val):
write_val(outbuf, k_type, k, k_spec)
write_val(outbuf, v_type, val[k], v_spec)
elif ttype == TType.STRUCT:
for fid in iter(val.thrift_spec):
f_spec = val.thrift_spec[fid]
if len(f_spec) == 3:
f_type, f_name, f_req = f_spec
f_container_spec = None
else:
f_type, f_name, f_container_spec, f_req = f_spec
v = getattr(val, f_name)
if v is None:
continue
write_field_begin(outbuf, f_type, fid)
write_val(outbuf, f_type, v, f_container_spec)
write_field_stop(outbuf)
def read_message_begin(inbuf, strict=True):
sz = unpack_i32(inbuf.read(4))
if sz < 0:
version = sz & VERSION_MASK
if version != VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in read_message_begin: %d' % (sz))
name_sz = unpack_i32(inbuf.read(4))
name = inbuf.read(name_sz).decode('utf-8')
type_ = sz & TYPE_MASK
else:
if strict:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = inbuf.read(sz).decode('utf-8')
type_ = unpack_i8(inbuf.read(1))
seqid = unpack_i32(inbuf.read(4))
return name, type_, seqid
def read_field_begin(inbuf):
f_type = unpack_i8(inbuf.read(1))
if f_type == TType.STOP:
return f_type, 0
return f_type, unpack_i16(inbuf.read(2))
def read_list_begin(inbuf):
e_type = unpack_i8(inbuf.read(1))
sz = unpack_i32(inbuf.read(4))
return e_type, sz
def read_map_begin(inbuf):
k_type, v_type = unpack_i8(inbuf.read(1)), unpack_i8(inbuf.read(1))
sz = unpack_i32(inbuf.read(4))
return k_type, v_type, sz
def read_val(inbuf, ttype, spec=None, decode_response=True):
if ttype == TType.BOOL:
return bool(unpack_i8(inbuf.read(1)))
elif ttype == TType.BYTE:
return unpack_i8(inbuf.read(1))
elif ttype == TType.I16:
return unpack_i16(inbuf.read(2))
elif ttype == TType.I32:
return unpack_i32(inbuf.read(4))
elif ttype == TType.I64:
return unpack_i64(inbuf.read(8))
elif ttype == TType.DOUBLE:
return unpack_double(inbuf.read(8))
elif ttype == TType.STRING:
sz = unpack_i32(inbuf.read(4))
byte_payload = inbuf.read(sz)
# Since we cannot tell if we're getting STRING or BINARY
# if not asked not to decode, try both
if decode_response:
try:
return byte_payload.decode('utf-8')
except UnicodeDecodeError:
pass
return byte_payload
elif ttype == TType.SET or ttype == TType.LIST:
if isinstance(spec, tuple):
v_type, v_spec = spec[0], spec[1]
else:
v_type, v_spec = spec, None
result = []
r_type, sz = read_list_begin(inbuf)
# the v_type is useless here since we already get it from spec
if r_type != v_type:
for _ in range(sz):
skip(inbuf, r_type)
return []
for i in range(sz):
result.append(read_val(inbuf, v_type, v_spec, decode_response))
return result
elif ttype == TType.MAP:
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
result = {}
sk_type, sv_type, sz = read_map_begin(inbuf)
if sk_type != k_type or sv_type != v_type:
for _ in range(sz):
skip(inbuf, sk_type)
skip(inbuf, sv_type)
return {}
for i in range(sz):
k_val = read_val(inbuf, k_type, k_spec, decode_response)
v_val = read_val(inbuf, v_type, v_spec, decode_response)
result[k_val] = v_val
return result
elif ttype == TType.STRUCT:
obj = spec()
read_struct(inbuf, obj, decode_response)
return obj
def read_struct(inbuf, obj, decode_response=True):
while True:
f_type, fid = read_field_begin(inbuf)
if f_type == TType.STOP:
break
if fid not in obj.thrift_spec:
skip(inbuf, f_type)
continue
if len(obj.thrift_spec[fid]) == 3:
sf_type, f_name, f_req = obj.thrift_spec[fid]
f_container_spec = None
else:
sf_type, f_name, f_container_spec, f_req = obj.thrift_spec[fid]
# it really should equal here. but since we already wasted
# space storing the duplicate info, let's check it.
if f_type != sf_type:
skip(inbuf, f_type)
continue
setattr(obj, f_name,
read_val(inbuf, f_type, f_container_spec, decode_response))
def skip(inbuf, ftype):
if ftype == TType.BOOL or ftype == TType.BYTE:
inbuf.read(1)
elif ftype == TType.I16:
inbuf.read(2)
elif ftype == TType.I32:
inbuf.read(4)
elif ftype == TType.I64:
inbuf.read(8)
elif ftype == TType.DOUBLE:
inbuf.read(8)
elif ftype == TType.STRING:
inbuf.read(unpack_i32(inbuf.read(4)))
elif ftype == TType.SET or ftype == TType.LIST:
v_type, sz = read_list_begin(inbuf)
for i in range(sz):
skip(inbuf, v_type)
elif ftype == TType.MAP:
k_type, v_type, sz = read_map_begin(inbuf)
for i in range(sz):
skip(inbuf, k_type)
skip(inbuf, v_type)
elif ftype == TType.STRUCT:
while True:
f_type, fid = read_field_begin(inbuf)
if f_type == TType.STOP:
break
skip(inbuf, f_type)
class TBinaryProtocol(object):
"""Binary implementation of the Thrift protocol driver."""
def __init__(self, trans,
strict_read=True, strict_write=True,
decode_response=True):
self.trans = trans
self.strict_read = strict_read
self.strict_write = strict_write
self.decode_response = decode_response
def skip(self, ttype):
skip(self.trans, ttype)
def read_message_begin(self):
api, ttype, seqid = read_message_begin(
self.trans, strict=self.strict_read)
return api, ttype, seqid
def read_message_end(self):
pass
def write_message_begin(self, name, ttype, seqid):
write_message_begin(self.trans, name, ttype, seqid,
strict=self.strict_write)
def write_message_end(self):
pass
def read_struct(self, obj):
return read_struct(self.trans, obj, self.decode_response)
def write_struct(self, obj):
write_val(self.trans, TType.STRUCT, obj)
class TBinaryProtocolFactory(object):
def __init__(self, strict_read=True, strict_write=True,
decode_response=True):
self.strict_read = strict_read
self.strict_write = strict_write
self.decode_response = decode_response
def get_protocol(self, trans):
return TBinaryProtocol(trans,
self.strict_read, self.strict_write,
self.decode_response)
| apache-2.0 |
lxn2/mxnet | python/mxnet/rnn/io.py | 9 | 6562 | # coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch, DataDesc
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab, "Unknown token %s"%word
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
The label at each sequence step is the following token
in the sequence.
Parameters
----------
sentences : list of list of int
Encoded sentences.
batch_size : int
Batch size of the data.
invalid_label : int, optional
Key for invalid label, e.g. <end-of-sentence>. The default is -1.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NT'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.layout = layout
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [DataDesc(
name=self.data_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
elif self.major_axis == 1:
self.provide_data = [DataDesc(
name=self.data_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)])
| apache-2.0 |
spacewalkproject/spacewalk | backend/server/test/test_registration.py | 14 | 1054 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.common.rhnConfig import initCFG
from spacewalk.server.rhnSQL import initDB
from spacewalk.server.xmlrpc import registration
initCFG("server.xmlrpc")
initDB('rhnuser/rhnuser@webdev')
r = registration.Registration()
data = {
'os_release': '8.0',
'profile_name': 'test local',
'architecture': 'i686',
'token': '382c712e94b2505f6070f011e8ec1a7e',
}
open("/tmp/rereg-systemid", "w+").write(r.new_system(data))
| gpl-2.0 |
jostep/tensorflow | tensorflow/python/ops/math_grad_test.py | 26 | 7137 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.test_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.test_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
| apache-2.0 |
GbalsaC/bitnamiP | common/djangoapps/course_modes/migrations/0007_add_description.py | 114 | 2270 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.description'
db.add_column('course_modes_coursemode', 'description',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Deleting field 'CourseMode.description'
db.delete_column('course_modes_coursemode', 'description')
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
resmo/ansible | test/units/modules/network/nxos/test_nxos_acl_interface.py | 68 | 2902 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metacl_interfaceass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_acl_interface
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosAclInterfaceModule(TestNxosModule):
module = nxos_acl_interface
def setUp(self):
super(TestNxosAclInterfaceModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_acl_interface.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_acl_interface.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestNxosAclInterfaceModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item['command']
except ValueError:
command = item
filename = '%s.txt' % str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('nxos_acl_interface', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_acl_interface(self):
set_module_args(dict(name='ANSIBLE', interface='ethernet1/41', direction='egress'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface ethernet1/41', 'ip access-group ANSIBLE out'])
def test_nxos_acl_interface_remove(self):
set_module_args(dict(name='copp-system-p-acl-bgp', interface='ethernet1/41',
direction='egress', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface ethernet1/41', 'no ip access-group copp-system-p-acl-bgp out'])
| gpl-3.0 |
chirilo/kuma | vendor/packages/pygments/formatters/other.py | 73 | 5162 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = u'''\
def testNeedsName(self):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = u'''\
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
'''
class TestcaseFormatter(Formatter):
"""
Format tokens as appropriate for a new testcase.
.. versionadded:: 2.0
"""
name = 'Testcase'
aliases = ['testcase']
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding is not None and self.encoding != 'utf-8':
raise ValueError("Only None and utf-8 are allowed encodings.")
def format(self, tokensource, outfile):
indentation = ' ' * 12
rawbuf = []
outbuf = []
for ttype, value in tokensource:
rawbuf.append(value)
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
before = TESTCASE_BEFORE % (u''.join(rawbuf),)
during = u''.join(outbuf)
after = TESTCASE_AFTER
if self.encoding is None:
outfile.write(before + during + after)
else:
outfile.write(before.encode('utf-8'))
outfile.write(during.encode('utf-8'))
outfile.write(after.encode('utf-8'))
outfile.flush()
| mpl-2.0 |
ajduncan/granolacms | fckeditor/editor/filemanager/connectors/py/wsgi.py | 93 | 1629 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| gpl-2.0 |
randynobx/ansible | lib/ansible/modules/packaging/os/pkg5_publisher.py | 71 | 5936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS
packages from.
options:
name:
description:
- The publisher's name.
required: true
aliases: [ publisher ]
state:
description:
- Whether to ensure that a publisher is present or absent.
required: false
default: present
choices: [ present, absent ]
sticky:
description:
- Packages installed from a sticky repository can only receive updates
from that repository.
required: false
default: null
choices: [ true, false ]
enabled:
description:
- Is the repository enabled or disabled?
required: false
default: null
choices: [ true, false ]
origin:
description:
- A path or URL to the repository.
- Multiple values may be provided.
required: false
default: null
mirror:
description:
- A path or URL to the repository mirror.
- Multiple values may be provided.
required: false
default: null
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
- pkg5_publisher:
name: solaris
sticky: true
origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
- pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
sticky=dict(type='bool'),
enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
mirror=dict(type='list'),
)
)
for option in ['origin', 'mirror']:
if module.params[option] == ['']:
module.params[option] = []
if module.params['state'] == 'present':
modify_publisher(module, module.params)
else:
unset_publisher(module, module.params['name'])
def modify_publisher(module, params):
name = params['name']
existing = get_publishers(module)
if name in existing:
for option in ['origin', 'mirror', 'sticky', 'enabled']:
if params[option] is not None:
if params[option] != existing[name][option]:
return set_publisher(module, params)
else:
return set_publisher(module, params)
module.exit_json()
def set_publisher(module, params):
name = params['name']
args = []
if params['origin'] is not None:
args.append('--remove-origin=*')
args.extend(['--add-origin=' + u for u in params['origin']])
if params['mirror'] is not None:
args.append('--remove-mirror=*')
args.extend(['--add-mirror=' + u for u in params['mirror']])
if params['sticky'] is not None and params['sticky']:
args.append('--sticky')
elif params['sticky'] is not None:
args.append('--non-sticky')
if params['enabled'] is not None and params['enabled']:
args.append('--enable')
elif params['enabled'] is not None:
args.append('--disable')
rc, out, err = module.run_command(
["pkg", "set-publisher"] + args + [name],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def unset_publisher(module, publisher):
if not publisher in get_publishers(module):
module.exit_json()
rc, out, err = module.run_command(
["pkg", "unset-publisher", publisher],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
module.exit_json(**response)
def get_publishers(module):
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
lines = out.splitlines()
keys = lines.pop(0).lower().split("\t")
publishers = {}
for line in lines:
values = dict(zip(keys, map(unstringify, line.split("\t"))))
name = values['publisher']
if not name in publishers:
publishers[name] = dict(
(k, values[k]) for k in ['sticky', 'enabled']
)
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
if values['type'] is not None:
publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
if val == "-" or val == '':
return None
elif val == "true":
return True
elif val == "false":
return False
else:
return val
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
memtoko/django | django/contrib/gis/gdal/prototypes/raster.py | 24 | 3671 | """
This module houses the ctypes function prototypes for GDAL DataSource (raster)
related data structures.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from functools import partial
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, void_output,
voidptr_output,
)
# For more detail about c function names and definitions see
# http://gdal.org/gdal_8h.html
# http://gdal.org/gdalwarper_8h.html
# Prepare partial functions that use cpl error codes
void_output = partial(void_output, cpl=True)
const_string_output = partial(const_string_output, cpl=True)
double_output = partial(double_output, cpl=True)
# Raster Driver Routines
register_all = void_output(lgdal.GDALAllRegister, [])
get_driver = voidptr_output(lgdal.GDALGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.GDALGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.GDALGetDriverCount, [])
get_driver_description = const_string_output(lgdal.GDALGetDescription, [c_void_p])
# Raster Data Source Routines
create_ds = voidptr_output(lgdal.GDALCreate, [c_void_p, c_char_p, c_int, c_int, c_int, c_int])
open_ds = voidptr_output(lgdal.GDALOpen, [c_char_p, c_int])
close_ds = void_output(lgdal.GDALClose, [c_void_p])
copy_ds = voidptr_output(lgdal.GDALCreateCopy, [c_void_p, c_char_p, c_void_p, c_int,
POINTER(c_char_p), c_void_p, c_void_p])
add_band_ds = void_output(lgdal.GDALAddBand, [c_void_p, c_int])
get_ds_description = const_string_output(lgdal.GDALGetDescription, [])
get_ds_driver = voidptr_output(lgdal.GDALGetDatasetDriver, [c_void_p])
get_ds_xsize = int_output(lgdal.GDALGetRasterXSize, [c_void_p])
get_ds_ysize = int_output(lgdal.GDALGetRasterYSize, [c_void_p])
get_ds_raster_count = int_output(lgdal.GDALGetRasterCount, [c_void_p])
get_ds_raster_band = voidptr_output(lgdal.GDALGetRasterBand, [c_void_p, c_int])
get_ds_projection_ref = const_string_output(lgdal.GDALGetProjectionRef, [c_void_p])
set_ds_projection_ref = void_output(lgdal.GDALSetProjection, [c_void_p, c_char_p])
get_ds_geotransform = void_output(lgdal.GDALGetGeoTransform, [c_void_p, POINTER(c_double * 6)], errcheck=False)
set_ds_geotransform = void_output(lgdal.GDALSetGeoTransform, [c_void_p, POINTER(c_double * 6)])
# Raster Band Routines
band_io = void_output(lgdal.GDALRasterIO, [c_void_p, c_int, c_int, c_int, c_int, c_int,
c_void_p, c_int, c_int, c_int, c_int, c_int])
get_band_xsize = int_output(lgdal.GDALGetRasterBandXSize, [c_void_p])
get_band_ysize = int_output(lgdal.GDALGetRasterBandYSize, [c_void_p])
get_band_index = int_output(lgdal.GDALGetBandNumber, [c_void_p])
get_band_description = const_string_output(lgdal.GDALGetDescription, [c_void_p])
get_band_ds = voidptr_output(lgdal.GDALGetBandDataset, [c_void_p])
get_band_datatype = int_output(lgdal.GDALGetRasterDataType, [c_void_p])
get_band_nodata_value = double_output(lgdal.GDALGetRasterNoDataValue, [c_void_p, POINTER(c_int)])
set_band_nodata_value = void_output(lgdal.GDALSetRasterNoDataValue, [c_void_p, c_double])
get_band_minimum = double_output(lgdal.GDALGetRasterMinimum, [c_void_p, POINTER(c_int)])
get_band_maximum = double_output(lgdal.GDALGetRasterMaximum, [c_void_p, POINTER(c_int)])
# Reprojection routine
reproject_image = void_output(lgdal.GDALReprojectImage, [c_void_p, c_char_p, c_void_p, c_char_p,
c_int, c_double, c_double,
c_void_p, c_void_p, c_void_p])
| bsd-3-clause |
hammerlab/cohorts | cohorts/plot.py | 1 | 10847 | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from scipy.stats import mannwhitneyu, fisher_exact
import seaborn as sb
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from .model import bootstrap_auc
def vertical_percent(plot, percent=0.1):
"""
Using the size of the y axis, return a fraction of that size.
"""
plot_bottom, plot_top = plot.get_ylim()
return percent * (plot_top - plot_bottom)
def as_numeric(text):
try:
return float(text)
except:
return None
def hide_ticks(plot, min_tick_value=None, max_tick_value=None):
"""Hide tick values that are outside of [min_tick_value, max_tick_value]"""
for tick, tick_value in zip(plot.get_yticklabels(), plot.get_yticks()):
tick_label = as_numeric(tick_value)
if tick_label:
if (min_tick_value is not None and tick_label < min_tick_value or
max_tick_value is not None and tick_label > max_tick_value):
tick.set_visible(False)
def hide_negative_y_ticks(plot):
hide_ticks(plot, min_tick_value=0)
def only_percentage_ticks(plot):
"""
Only show ticks from 0.0 to 1.0.
"""
hide_ticks(plot, min_tick_value=0, max_tick_value=1.0)
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False):
"""
Add a p-value significance indicator.
"""
plot_bottom, plot_top = plot.get_ylim()
# Give the plot a little room for the significance indicator
line_height = vertical_percent(plot, 0.1)
# Add some extra spacing below the indicator
plot_top = plot_top + line_height
# Add some extra spacing above the indicator
plot.set_ylim(top=plot_top + line_height * 2)
color = "black"
line_top = plot_top + line_height
plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color)
indicator = "*" if significant else "ns"
plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
def stripboxplot(x, y, data, ax=None, significant=None, **kwargs):
"""
Overlay a stripplot on top of a boxplot.
"""
ax = sb.boxplot(
x=x,
y=y,
data=data,
ax=ax,
fliersize=0,
**kwargs
)
plot = sb.stripplot(
x=x,
y=y,
data=data,
ax=ax,
jitter=kwargs.pop("jitter", 0.05),
color=kwargs.pop("color", "0.3"),
**kwargs
)
if data[y].min() >= 0:
hide_negative_y_ticks(plot)
if significant is not None:
add_significance_indicator(plot=plot, significant=significant)
return plot
def sided_str_from_alternative(alternative, condition):
if alternative is None:
raise ValueError("Must pick an alternative")
if alternative == "two-sided":
return alternative
# alternative hypothesis: condition is 'less' or 'greater' than no-condition
op_str = ">" if alternative == "greater" else "<"
return "one-sided: %s %s not %s" % (condition, op_str, condition)
def get_condition_mask(df, condition, condition_value):
if condition_value:
condition_mask = df[condition] == condition_value
else:
# This is necessary in the event that condition has a non-bool dtype,
# such as object. This may happen if a function returns np.nan in
# addition to True/False (later filtered down to just True/False).
# ~condition_mask will behave incorrectly if dtype is not bool.
condition_mask = df[condition].astype("bool")
return condition_mask
class FishersExactResults(namedtuple("FishersExactResults", ["oddsratio", "p_value", "sided_str", "with_condition1_series", "without_condition1_series", "plot"])):
def __str__(self):
return "FishersExactResults(oddsratio=%s, p_value=%s, sided_str='%s')" % (
self.oddsratio, self.p_value, self.sided_str)
def __repr__(self):
return self.__str__()
def fishers_exact_plot(data, condition1, condition2, ax=None,
condition1_value=None,
alternative="two-sided", **kwargs):
"""
Perform a Fisher's exact test to compare to binary columns
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition1: str
First binary column to compare (and used for test sidedness)
condition2: str
Second binary column to compare
ax : Axes, default None
Axes to plot on
condition1_value:
If `condition1` is not a binary column, split on =/!= to condition1_value
alternative:
Specify the sidedness of the test: "two-sided", "less"
or "greater"
"""
plot = sb.barplot(
x=condition1,
y=condition2,
ax=ax,
data=data,
**kwargs
)
plot.set_ylabel("Percent %s" % condition2)
condition1_mask = get_condition_mask(data, condition1, condition1_value)
count_table = pd.crosstab(data[condition1], data[condition2])
print(count_table)
oddsratio, p_value = fisher_exact(count_table, alternative=alternative)
add_significance_indicator(plot=plot, significant=p_value <= 0.05)
only_percentage_ticks(plot)
if alternative != "two-sided":
raise ValueError("We need to better understand the one-sided Fisher's Exact test")
sided_str = "two-sided"
print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str))
return FishersExactResults(oddsratio=oddsratio,
p_value=p_value,
sided_str=sided_str,
with_condition1_series=data[condition1_mask][condition2],
without_condition1_series=data[~condition1_mask][condition2],
plot=plot)
class MannWhitneyResults(namedtuple("MannWhitneyResults", ["U", "p_value", "sided_str", "with_condition_series", "without_condition_series", "plot"])):
def __str__(self):
return "MannWhitneyResults(U=%s, p_value=%s, sided_str='%s')" % (
self.U, self.p_value, self.sided_str)
def __repr__(self):
return self.__str__()
def mann_whitney_plot(data,
condition,
distribution,
ax=None,
condition_value=None,
alternative="two-sided",
skip_plot=False,
**kwargs):
"""
Create a box plot comparing a condition and perform a
Mann Whitney test to compare the distribution in condition A v B
Parameters
----------
data: Pandas dataframe
Dataframe to retrieve information from
condition: str
Column to use as the splitting criteria
distribution: str
Column to use as the Y-axis or distribution in the test
ax : Axes, default None
Axes to plot on
condition_value:
If `condition` is not a binary column, split on =/!= to condition_value
alternative:
Specify the sidedness of the Mann-Whitney test: "two-sided", "less"
or "greater"
skip_plot:
Calculate the test statistic and p-value, but don't plot.
"""
condition_mask = get_condition_mask(data, condition, condition_value)
U, p_value = mannwhitneyu(
data[condition_mask][distribution],
data[~condition_mask][distribution],
alternative=alternative
)
plot = None
if not skip_plot:
plot = stripboxplot(
x=condition,
y=distribution,
data=data,
ax=ax,
significant=p_value <= 0.05,
**kwargs
)
sided_str = sided_str_from_alternative(alternative, condition)
print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str))
return MannWhitneyResults(U=U,
p_value=p_value,
sided_str=sided_str,
with_condition_series=data[condition_mask][distribution],
without_condition_series=data[~condition_mask][distribution],
plot=plot)
class CorrelationResults(namedtuple("CorrelationResults", ["coeff", "p_value", "stat_func", "series_x", "series_y", "plot"])):
def __str__(self):
return "CorrelationResults(coeff=%s, p_value=%s, stat_func=%s)" % (
self.coeff, self.p_value, self.stat_func.__name__)
def __repr__(self):
return self.__str__()
def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None):
"""Create a ROC curve and compute the bootstrap AUC for the given variable and outcome
Parameters
----------
data : Pandas dataframe
Dataframe to retrieve information from
value_column : str
Column to retrieve the values from
outcome_column : str
Column to use as the outcome variable
bootstrap_samples : int, optional
Number of bootstrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_bootstrap_auc, roc_plot) : (float, matplotlib plot)
Mean AUC for the given number of bootstrap samples and the plot
"""
scores = bootstrap_auc(df=data,
col=value_column,
pred_col=outcome_column,
n_bootstrap=bootstrap_samples)
mean_bootstrap_auc = scores.mean()
print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format(
value_column, bootstrap_samples, mean_bootstrap_auc, scores.std()))
outcome = data[outcome_column].astype(int)
values = data[value_column]
fpr, tpr, thresholds = roc_curve(outcome, values)
if ax is None:
ax = plt.gca()
roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column)
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend(loc=2, borderaxespad=0.)
ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values)))
return (mean_bootstrap_auc, roc_plot)
| apache-2.0 |
jricardo27/travelhelper | travelhelper/apps/lonelyplanet/views/country.py | 1 | 1047 | """
Lonely Planet Country Views
"""
from __future__ import absolute_import
from django.views.generic import DetailView, ListView
from core.views.base import BaseView
from lonelyplanet.models.country import LPCountry
class BaseLonelyPlanetCountryView(BaseView):
"""
Base class for Lonely Planet views
"""
model = LPCountry
class CountriesView(BaseLonelyPlanetCountryView, ListView):
"""
List all available countries in Lonely Planet
"""
template_name = 'lonelyplanet/list_countries.html'
additional_context = {
'title': 'Lonely Planet Countries',
}
def get_queryset(self):
"""
Return the list of countries available in Lonely Planet
"""
return LPCountry.objects.order_by('name')
class CountryDetailView(BaseLonelyPlanetCountryView, DetailView):
"""
Detail view of a Country in Lonely Planet
"""
template_name = 'lonelyplanet/country_detail.html'
additional_context = {
'child_url_identifier': 'placedetail_lonelyplanet',
}
| bsd-3-clause |
TuSimple/mxnet | example/neural-style/model_vgg19.py | 52 | 5975 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import find_mxnet
import mxnet as mx
import os, sys
from collections import namedtuple
ConvExecutor = namedtuple('ConvExecutor', ['executor', 'data', 'data_grad', 'style', 'content', 'arg_dict'])
def get_symbol():
# declare symbol
data = mx.sym.Variable("data")
conv1_1 = mx.symbol.Convolution(name='conv1_1', data=data , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu1_1 = mx.symbol.Activation(name='relu1_1', data=conv1_1 , act_type='relu')
conv1_2 = mx.symbol.Convolution(name='conv1_2', data=relu1_1 , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu1_2 = mx.symbol.Activation(name='relu1_2', data=conv1_2 , act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=relu1_2 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv2_1 = mx.symbol.Convolution(name='conv2_1', data=pool1 , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu2_1 = mx.symbol.Activation(name='relu2_1', data=conv2_1 , act_type='relu')
conv2_2 = mx.symbol.Convolution(name='conv2_2', data=relu2_1 , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu2_2 = mx.symbol.Activation(name='relu2_2', data=conv2_2 , act_type='relu')
pool2 = mx.symbol.Pooling(name='pool2', data=relu2_2 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv3_1 = mx.symbol.Convolution(name='conv3_1', data=pool2 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu3_1 = mx.symbol.Activation(name='relu3_1', data=conv3_1 , act_type='relu')
conv3_2 = mx.symbol.Convolution(name='conv3_2', data=relu3_1 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu3_2 = mx.symbol.Activation(name='relu3_2', data=conv3_2 , act_type='relu')
conv3_3 = mx.symbol.Convolution(name='conv3_3', data=relu3_2 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu3_3 = mx.symbol.Activation(name='relu3_3', data=conv3_3 , act_type='relu')
conv3_4 = mx.symbol.Convolution(name='conv3_4', data=relu3_3 , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu3_4 = mx.symbol.Activation(name='relu3_4', data=conv3_4 , act_type='relu')
pool3 = mx.symbol.Pooling(name='pool3', data=relu3_4 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv4_1 = mx.symbol.Convolution(name='conv4_1', data=pool3 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu4_1 = mx.symbol.Activation(name='relu4_1', data=conv4_1 , act_type='relu')
conv4_2 = mx.symbol.Convolution(name='conv4_2', data=relu4_1 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu4_2 = mx.symbol.Activation(name='relu4_2', data=conv4_2 , act_type='relu')
conv4_3 = mx.symbol.Convolution(name='conv4_3', data=relu4_2 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu4_3 = mx.symbol.Activation(name='relu4_3', data=conv4_3 , act_type='relu')
conv4_4 = mx.symbol.Convolution(name='conv4_4', data=relu4_3 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu4_4 = mx.symbol.Activation(name='relu4_4', data=conv4_4 , act_type='relu')
pool4 = mx.symbol.Pooling(name='pool4', data=relu4_4 , pad=(0,0), kernel=(2,2), stride=(2,2), pool_type='avg')
conv5_1 = mx.symbol.Convolution(name='conv5_1', data=pool4 , num_filter=512, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=False, workspace=1024)
relu5_1 = mx.symbol.Activation(name='relu5_1', data=conv5_1 , act_type='relu')
# style and content layers
style = mx.sym.Group([relu1_1, relu2_1, relu3_1, relu4_1, relu5_1])
content = mx.sym.Group([relu4_2])
return style, content
def get_executor(style, content, input_size, ctx):
out = mx.sym.Group([style, content])
# make executor
arg_shapes, output_shapes, aux_shapes = out.infer_shape(data=(1, 3, input_size[0], input_size[1]))
arg_names = out.list_arguments()
arg_dict = dict(zip(arg_names, [mx.nd.zeros(shape, ctx=ctx) for shape in arg_shapes]))
grad_dict = {"data": arg_dict["data"].copyto(ctx)}
# init with pretrained weight
pretrained = mx.nd.load("./model/vgg19.params")
for name in arg_names:
if name == "data":
continue
key = "arg:" + name
if key in pretrained:
pretrained[key].copyto(arg_dict[name])
else:
print("Skip argument %s" % name)
executor = out.bind(ctx=ctx, args=arg_dict, args_grad=grad_dict, grad_req="write")
return ConvExecutor(executor=executor,
data=arg_dict["data"],
data_grad=grad_dict["data"],
style=executor.outputs[:-1],
content=executor.outputs[-1],
arg_dict=arg_dict)
def get_model(input_size, ctx):
style, content = get_symbol()
return get_executor(style, content, input_size, ctx)
| apache-2.0 |
ct-23/home-assistant | tests/components/test_alert.py | 10 | 7576 | """The tests for the Alert component."""
# pylint: disable=protected-access
from copy import deepcopy
import unittest
from homeassistant.setup import setup_component
from homeassistant.core import callback
import homeassistant.components.alert as alert
import homeassistant.components.notify as notify
from homeassistant.const import (CONF_ENTITY_ID, STATE_IDLE, CONF_NAME,
CONF_STATE, STATE_ON, STATE_OFF)
from tests.common import get_test_home_assistant
NAME = "alert_test"
DONE_MESSAGE = "alert_gone"
NOTIFIER = 'test'
TEST_CONFIG = \
{alert.DOMAIN: {
NAME: {
CONF_NAME: NAME,
alert.CONF_DONE_MESSAGE: DONE_MESSAGE,
CONF_ENTITY_ID: "sensor.test",
CONF_STATE: STATE_ON,
alert.CONF_REPEAT: 30,
alert.CONF_SKIP_FIRST: False,
alert.CONF_NOTIFIERS: [NOTIFIER]}
}}
TEST_NOACK = [NAME, NAME, DONE_MESSAGE, "sensor.test",
STATE_ON, [30], False, NOTIFIER, False]
ENTITY_ID = alert.ENTITY_ID_FORMAT.format(NAME)
# pylint: disable=invalid-name
class TestAlert(unittest.TestCase):
"""Test the alert module."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_is_on(self):
"""Test is_on method."""
self.hass.states.set(ENTITY_ID, STATE_ON)
self.hass.block_till_done()
self.assertTrue(alert.is_on(self.hass, ENTITY_ID))
self.hass.states.set(ENTITY_ID, STATE_OFF)
self.hass.block_till_done()
self.assertFalse(alert.is_on(self.hass, ENTITY_ID))
def test_setup(self):
"""Test setup method."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.assertEqual(STATE_IDLE, self.hass.states.get(ENTITY_ID).state)
def test_fire(self):
"""Test the alert firing."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(STATE_ON, self.hass.states.get(ENTITY_ID).state)
def test_silence(self):
"""Test silencing the alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
alert.turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
self.assertEqual(STATE_OFF, self.hass.states.get(ENTITY_ID).state)
# alert should not be silenced on next fire
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
self.assertEqual(STATE_IDLE, self.hass.states.get(ENTITY_ID).state)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(STATE_ON, self.hass.states.get(ENTITY_ID).state)
def test_reset(self):
"""Test resetting the alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
alert.turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
self.assertEqual(STATE_OFF, self.hass.states.get(ENTITY_ID).state)
alert.turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
self.assertEqual(STATE_ON, self.hass.states.get(ENTITY_ID).state)
def test_toggle(self):
"""Test toggling alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(STATE_ON, self.hass.states.get(ENTITY_ID).state)
alert.toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
self.assertEqual(STATE_OFF, self.hass.states.get(ENTITY_ID).state)
alert.toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
self.assertEqual(STATE_ON, self.hass.states.get(ENTITY_ID).state)
def test_hidden(self):
"""Test entity hidding."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
hidden = self.hass.states.get(ENTITY_ID).attributes.get('hidden')
self.assertTrue(hidden)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
hidden = self.hass.states.get(ENTITY_ID).attributes.get('hidden')
self.assertFalse(hidden)
alert.turn_off(self.hass, ENTITY_ID)
hidden = self.hass.states.get(ENTITY_ID).attributes.get('hidden')
self.assertFalse(hidden)
def test_notification_no_done_message(self):
"""Test notifications."""
events = []
config = deepcopy(TEST_CONFIG)
del(config[alert.DOMAIN][NAME][alert.CONF_DONE_MESSAGE])
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(
notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, config)
self.assertEqual(0, len(events))
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_notification(self):
"""Test notifications."""
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(
notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.assertEqual(0, len(events))
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
self.assertEqual(2, len(events))
def test_skipfirst(self):
"""Test skipping first notification."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_SKIP_FIRST] = True
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(
notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, config)
self.assertEqual(0, len(events))
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
self.assertEqual(0, len(events))
def test_noack(self):
"""Test no ack feature."""
entity = alert.Alert(self.hass, *TEST_NOACK)
self.hass.add_job(entity.begin_alerting)
self.hass.block_till_done()
self.assertEqual(True, entity.hidden)
def test_done_message_state_tracker_reset_on_cancel(self):
"""Test that the done message is reset when cancelled."""
entity = alert.Alert(self.hass, *TEST_NOACK)
entity._cancel = lambda *args: None
assert entity._send_done_message is False
entity._send_done_message = True
self.hass.add_job(entity.end_alerting)
self.hass.block_till_done()
assert entity._send_done_message is False
| apache-2.0 |
shacker/django | django/db/backends/oracle/operations.py | 6 | 24223 | import datetime
import re
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import strip_quotes, truncate_name
from django.utils import timezone
from django.utils.encoding import force_bytes
from .base import Database
from .utils import BulkInsertMapper, InsertIdVar, Oracle_datetime
class DatabaseOperations(BaseDatabaseOperations):
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
set_operators = dict(BaseDatabaseOperations.set_operators, difference='MINUS')
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
seq_name user_tab_identity_cols.sequence_name%%TYPE;
BEGIN
BEGIN
SELECT sequence_name INTO seq_name FROM user_tab_identity_cols
WHERE table_name = '%(table_name)s' AND
column_name = '%(column_name)s';
EXCEPTION WHEN NO_DATA_FOUND THEN
seq_name := '%(no_autofield_sequence_name)s';
END;
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = seq_name;
WHILE table_value > seq_value LOOP
EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL'
INTO seq_value;
END LOOP;
END;
/"""
# Oracle doesn't support string without precision; use the max string size.
cast_char_field_without_max_length = 'NVARCHAR2(2000)'
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
elif lookup_type == 'week':
# IW = ISO week number
return "TO_CHAR(%s, 'IW')" % field_name
elif lookup_type == 'quarter':
return "TO_CHAR(%s, 'Q')" % field_name
else:
# https://docs.oracle.com/database/121/SQLRF/functions067.htm#SQLRF00639
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, timedelta):
"""
NUMTODSINTERVAL converts number to INTERVAL DAY TO SECOND literal.
"""
return "NUMTODSINTERVAL(%06f, 'SECOND')" % timedelta.total_seconds()
def date_trunc_sql(self, lookup_type, field_name):
# https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
return "TRUNC(%s, 'Q')" % field_name
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE
# and cast it back to TIMESTAMP to strip the TIME ZONE details.
return "CAST((FROM_TZ(%s, '0:00') AT TIME ZONE '%s') AS TIMESTAMP)" % (field_name, tzname)
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return 'TRUNC(%s)' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
# Since `TimeField` values are stored as TIMESTAMP where only the date
# part is ignored, convert the field to the specified timezone.
return self._convert_field_to_tz(field_name, tzname)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'quarter':
sql = "TRUNC(%s, 'Q')" % field_name
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def time_trunc_sql(self, lookup_type, field_name):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
if lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
elif lookup_type == 'second':
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
# Oracle stores empty strings as null. If the field accepts the empty
# string, undo this to adhere to the Django convention of using
# the empty string instead of null.
if expression.field.empty_strings_allowed:
converters.append(
self.convert_empty_bytes
if internal_type == 'BinaryField' else
self.convert_empty_string
)
return converters
def convert_textfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = value.read()
return value
def convert_binaryfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
@staticmethod
def convert_empty_string(value, expression, connection):
return '' if value is None else value
@staticmethod
def convert_empty_bytes(value, expression, connection):
return b'' if value is None else value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def no_limit_value(self):
return None
def limit_offset_sql(self, low_mark, high_mark):
fetch, offset = self._get_limit_offset_params(low_mark, high_mark)
return '%s%s' % (
(' OFFSET %d ROWS' % offset) if offset else '',
(' FETCH FIRST %d ROWS ONLY' % fetch) if fetch else '',
)
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super().last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name)
cursor.execute('"%s".currval' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return value.read()
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return "SAVEPOINT " + self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT " + self.quote_name(sid)
def _foreign_key_constraints(self, table_name, recursive=False):
with self.connection.cursor() as cursor:
if recursive:
cursor.execute("""
SELECT
user_tables.table_name, rcons.constraint_name
FROM
user_tables
JOIN
user_constraints cons
ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U'))
LEFT JOIN
user_constraints rcons
ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R')
START WITH user_tables.table_name = UPPER(%s)
CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name
GROUP BY
user_tables.table_name, rcons.constraint_name
HAVING user_tables.table_name != UPPER(%s)
ORDER BY MAX(level) DESC
""", (table_name, table_name))
else:
cursor.execute("""
SELECT
cons.table_name, cons.constraint_name
FROM
user_constraints cons
WHERE
cons.constraint_type = 'R'
AND cons.table_name = UPPER(%s)
""", (table_name,))
return cursor.fetchall()
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
truncated_tables = {table.upper() for table in tables}
constraints = set()
# Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE
# foreign keys which Django doesn't define. Emulate the
# PostgreSQL behavior which truncates all dependent tables by
# manually retrieving all foreign key constraints and resolving
# dependencies.
for table in tables:
for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade):
if allow_cascade:
truncated_tables.add(foreign_table)
constraints.add((foreign_table, constraint))
sql = [
"%s %s %s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('DISABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
style.SQL_KEYWORD('KEEP'),
style.SQL_KEYWORD('INDEX'),
) for table, constraint in constraints
] + [
"%s %s %s;" % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
) for table in truncated_tables
] + [
"%s %s %s %s %s %s;" % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD('ENABLE'),
style.SQL_KEYWORD('CONSTRAINT'),
style.SQL_FIELD(self.quote_name(constraint)),
) for table, constraint in constraints
]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table'])
table = self.quote_name(sequence_info['table'])
column = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table)
table = self.quote_name(model._meta.db_table)
column = self.quote_name(f.column)
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': strip_quotes(column),
})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table())
table = self.quote_name(f.m2m_db_table())
column = self.quote_name('id')
output.append(query % {
'no_autofield_sequence_name': no_autofield_sequence_name,
'table': table,
'column': column,
'table_name': strip_quotes(table),
'column_name': 'ID',
})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if isinstance(value, str):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs}
elif connector == '<<':
return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '>>':
return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def _get_no_autofield_sequence_name(self, table):
"""
Manually created sequence name to keep backward compatibility for
AutoFields that aren't Oracle identity columns.
"""
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper()
def _get_sequence_name(self, cursor, table, pk_name):
cursor.execute("""
SELECT sequence_name
FROM user_tab_identity_cols
WHERE table_name = UPPER(%s)
AND column_name = UPPER(%s)""", [table, pk_name])
row = cursor.fetchone()
return self._get_no_autofield_sequence_name(table) if row is None else row[0]
def bulk_insert_sql(self, fields, placeholder_rows):
query = []
for row in placeholder_rows:
select = []
for i, placeholder in enumerate(row):
# A model without any fields has fields=[None].
if fields[i]:
internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type()
placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder
# Add columns aliases to the first select to avoid "ORA-00918:
# column ambiguously defined" when two or more columns in the
# first select have the same value.
if not query:
placeholder = '%s col_%s' % (placeholder, i)
select.append(placeholder)
query.append('SELECT %s FROM DUAL' % ', '.join(select))
# Bulk insert to tables with Oracle identity columns causes Oracle to
# add sequence.nextval to it. Sequence.nextval cannot be used with the
# UNION operator. To prevent incorrect SQL, move UNION to a subquery.
return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(%s - %s, 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super().subtract_temporals(internal_type, lhs, rhs)
def bulk_batch_size(self, fields, objs):
"""Oracle restricts the number of parameters in a query."""
if fields:
return self.connection.features.max_query_params // len(fields)
return len(objs)
| bsd-3-clause |
browniebroke/django-acme | setup.py | 1 | 1938 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = '0.2.4'
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-acme',
version=version,
description="""A re-usable Django app to quickly deploy a page for the ACME challenge""",
long_description=readme + '\n\n' + history,
author='Bruno Alla',
author_email='alla.brunoo@gmail.com',
url='https://github.com/browniebroke/django-acme',
packages=[
'acme_challenge',
],
include_package_data=True,
install_requires=[],
license="MIT",
zip_safe=False,
keywords='django-acme',
classifiers=[
'Development Status :: 7 - Inactive',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| mit |
bastibl/gr-ieee802-15-4 | python/qa_interleaver_ii.py | 4 | 3631 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
from css_phy import physical_layer as phy
class qa_interleaver_ii (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
cfg = phy(slow_rate=False)
data_in = range(541) # some random prime number
self.src = blocks.vector_source_i(data_in)
self.intlv = ieee802_15_4.interleaver_ii(intlv_seq=(),forward=True)
self.snk = blocks.vector_sink_i(1)
self.tb.connect(self.src, self.intlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
self.assertFloatTuplesAlmostEqual(data_in, data_out)
def test_002_t (self):
# set up fg
cfg = phy(slow_rate=False)
data_in = range(541) # some random prime number
self.src = blocks.vector_source_i(data_in)
self.intlv = ieee802_15_4.interleaver_ii(intlv_seq=(),forward=False)
self.snk = blocks.vector_sink_i(1)
self.tb.connect(self.src, self.intlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
self.assertFloatTuplesAlmostEqual(data_in, data_out)
def test_003_t (self):
# set up fg
cfg = phy(slow_rate=True)
data_in = range(3*len(cfg.intlv_seq)) # some random prime number
self.src = blocks.vector_source_i(data_in)
self.intlv = ieee802_15_4.interleaver_ii(intlv_seq=cfg.intlv_seq,forward=True)
self.snk = blocks.vector_sink_i(1)
self.tb.connect(self.src, self.intlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = []
for n in range(3):
for i in range(len(cfg.intlv_seq)):
ref.append(data_in[n*len(cfg.intlv_seq)+cfg.intlv_seq[i]])
self.assertFloatTuplesAlmostEqual(ref, data_out)
def test_004_t (self):
# set up fg
cfg = phy(slow_rate=True)
data_in = range(3*len(cfg.intlv_seq)) # some random prime number
self.src = blocks.vector_source_i(data_in)
self.intlv = ieee802_15_4.interleaver_ii(intlv_seq=cfg.intlv_seq,forward=True)
self.deintlv = ieee802_15_4.interleaver_ii(intlv_seq=cfg.intlv_seq,forward=False)
self.snk = blocks.vector_sink_i(1)
self.tb.connect(self.src, self.intlv, self.deintlv, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
self.assertFloatTuplesAlmostEqual(data_in, data_out)
if __name__ == '__main__':
gr_unittest.run(qa_interleaver_ii, "qa_interleaver_ii.xml")
| gpl-3.0 |
smalyshev/pywikibot-core | scripts/touch.py | 3 | 3094 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot goes over multiple pages of a wiki, and edits them without changes.
This is for example used to get category links in templates
working.
This script understands various command-line arguments:
¶ms;
-purge Do not touch but purge the page
-botflag Force botflag in case of edits with changes.
"""
#
# (C) Pywikibot team, 2009-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators
from pywikibot.bot import MultipleSitesBot
docuReplacements = {'¶ms;': pagegenerators.parameterHelp}
class TouchBot(MultipleSitesBot):
"""Page touch bot."""
def __init__(self, generator, **kwargs):
"""Initialize a TouchBot instance with the options and generator."""
self.availableOptions.update({
'botflag': False,
})
super(TouchBot, self).__init__(generator=generator, **kwargs)
def treat(self, page):
"""Touch the given page."""
try:
page.touch(botflag=self.getOption('botflag'))
except pywikibot.NoPage:
pywikibot.error(u"Page %s does not exist."
% page.title(asLink=True))
except pywikibot.LockedPage:
pywikibot.error(u"Page %s is locked."
% page.title(asLink=True))
except pywikibot.PageNotSaved:
pywikibot.error(u"Page %s not saved."
% page.title(asLink=True))
class PurgeBot(MultipleSitesBot):
"""Purge each page on the generator."""
def treat(self, page):
"""Purge the given page."""
pywikibot.output(u'Page %s%s purged'
% (page.title(asLink=True),
"" if page.purge() else " not"))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
options = {}
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
bot_class = TouchBot
for arg in local_args:
if arg == '-purge':
bot_class = PurgeBot
elif arg == '-redir':
pywikibot.output(u'-redirect option is deprecated, '
'do not use it anymore.')
elif not genFactory.handleArg(arg) and arg.startswith("-"):
# -botflag
options[arg[1:].lower()] = True
gen = genFactory.getCombinedGenerator()
if gen:
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = bot_class(generator=preloadingGen, **options)
pywikibot.Site().login()
bot.run()
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == "__main__":
main()
| mit |
jmr0/servo | tests/wpt/run.py | 40 | 2301 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import multiprocessing
import os
import sys
import mozlog
import grouping_formatter
here = os.path.split(__file__)[0]
servo_root = os.path.abspath(os.path.join(here, "..", ".."))
def wpt_path(*args):
return os.path.join(here, *args)
def servo_path(*args):
return os.path.join(servo_root, *args)
# Imports
sys.path.append(wpt_path("harness"))
from wptrunner import wptrunner, wptcommandline
def run_tests(paths=None, **kwargs):
if paths is None:
paths = {}
set_defaults(paths, kwargs)
mozlog.commandline.log_formatters["servo"] = \
(grouping_formatter.GroupingFormatter, "A grouping output formatter")
use_mach_logging = False
if len(kwargs["test_list"]) == 1:
file_ext = os.path.splitext(kwargs["test_list"][0])[1].lower()
if file_ext in [".htm", ".html", ".js", ".xhtml"]:
use_mach_logging = True
if use_mach_logging:
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
else:
wptrunner.setup_logging(kwargs, {"servo": sys.stdout})
success = wptrunner.run_tests(**kwargs)
return 0 if success else 1
def set_defaults(paths, kwargs):
if kwargs["product"] is None:
kwargs["product"] = "servo"
if kwargs["config"] is None and "config" in paths:
kwargs["config"] = paths["config"]
if kwargs["include_manifest"] is None and "include_manifest" in paths:
kwargs["include_manifest"] = paths["include_manifest"]
if kwargs["binary"] is None:
bin_dir = "release" if kwargs["release"] else "debug"
bin_name = "servo"
if sys.platform == "win32":
bin_name += ".exe"
bin_path = servo_path("target", bin_dir, bin_name)
kwargs["binary"] = bin_path
if kwargs["processes"] is None:
kwargs["processes"] = multiprocessing.cpu_count()
kwargs["user_stylesheets"].append(servo_path("resources", "ahem.css"))
wptcommandline.check_args(kwargs)
def main(paths=None):
parser = wptcommandline.create_parser()
kwargs = vars(parser.parse_args())
return run_tests(paths, **kwargs)
| mpl-2.0 |
Elandril/Sick-Beard | lib/hachoir_metadata/video.py | 90 | 15568 | from lib.hachoir_core.field import MissingField
from lib.hachoir_metadata.metadata import (registerExtractor,
Metadata, RootMetadata, MultipleMetadata)
from lib.hachoir_metadata.metadata_item import QUALITY_GOOD
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.video import MovFile, AsfFile, FlvFile
from lib.hachoir_parser.video.asf import Descriptor as ASF_Descriptor
from lib.hachoir_parser.container import MkvFile
from lib.hachoir_parser.container.mkv import dateToDatetime
from lib.hachoir_core.i18n import _
from lib.hachoir_core.tools import makeUnicode, makePrintable, timedelta2seconds
from datetime import timedelta
class MkvMetadata(MultipleMetadata):
tag_key = {
"TITLE": "title",
"URL": "url",
"COPYRIGHT": "copyright",
# TODO: use maybe another name?
# Its value may be different than (...)/Info/DateUTC/date
"DATE_RECORDED": "creation_date",
# TODO: Extract subtitle metadata
"SUBTITLE": "subtitle_author",
}
def extract(self, mkv):
for segment in mkv.array("Segment"):
self.processSegment(segment)
def processSegment(self, segment):
for field in segment:
if field.name.startswith("Info["):
self.processInfo(field)
elif field.name.startswith("Tags["):
for tag in field.array("Tag"):
self.processTag(tag)
elif field.name.startswith("Tracks["):
self.processTracks(field)
elif field.name.startswith("Cluster["):
if self.quality < QUALITY_GOOD:
return
def processTracks(self, tracks):
for entry in tracks.array("TrackEntry"):
self.processTrack(entry)
def processTrack(self, track):
if "TrackType/enum" not in track:
return
if track["TrackType/enum"].display == "video":
self.processVideo(track)
elif track["TrackType/enum"].display == "audio":
self.processAudio(track)
elif track["TrackType/enum"].display == "subtitle":
self.processSubtitle(track)
def trackCommon(self, track, meta):
if "Name/unicode" in track:
meta.title = track["Name/unicode"].value
if "Language/string" in track \
and track["Language/string"].value not in ("mis", "und"):
meta.language = track["Language/string"].value
def processVideo(self, track):
video = Metadata(self)
self.trackCommon(track, video)
try:
video.compression = track["CodecID/string"].value
if "Video" in track:
video.width = track["Video/PixelWidth/unsigned"].value
video.height = track["Video/PixelHeight/unsigned"].value
except MissingField:
pass
self.addGroup("video[]", video, "Video stream")
def getDouble(self, field, parent):
float_key = '%s/float' % parent
if float_key in field:
return field[float_key].value
double_key = '%s/double' % parent
if double_key in field:
return field[double_key].value
return None
def processAudio(self, track):
audio = Metadata(self)
self.trackCommon(track, audio)
if "Audio" in track:
frequency = self.getDouble(track, "Audio/SamplingFrequency")
if frequency is not None:
audio.sample_rate = frequency
if "Audio/Channels/unsigned" in track:
audio.nb_channel = track["Audio/Channels/unsigned"].value
if "Audio/BitDepth/unsigned" in track:
audio.bits_per_sample = track["Audio/BitDepth/unsigned"].value
if "CodecID/string" in track:
audio.compression = track["CodecID/string"].value
self.addGroup("audio[]", audio, "Audio stream")
def processSubtitle(self, track):
sub = Metadata(self)
self.trackCommon(track, sub)
try:
sub.compression = track["CodecID/string"].value
except MissingField:
pass
self.addGroup("subtitle[]", sub, "Subtitle")
def processTag(self, tag):
for field in tag.array("SimpleTag"):
self.processSimpleTag(field)
def processSimpleTag(self, tag):
if "TagName/unicode" not in tag \
or "TagString/unicode" not in tag:
return
name = tag["TagName/unicode"].value
if name not in self.tag_key:
return
key = self.tag_key[name]
value = tag["TagString/unicode"].value
setattr(self, key, value)
def processInfo(self, info):
if "TimecodeScale/unsigned" in info:
duration = self.getDouble(info, "Duration")
if duration is not None:
try:
seconds = duration * info["TimecodeScale/unsigned"].value * 1e-9
self.duration = timedelta(seconds=seconds)
except OverflowError:
# Catch OverflowError for timedelta (long int too large
# to be converted to an int)
pass
if "DateUTC/date" in info:
try:
self.creation_date = dateToDatetime(info["DateUTC/date"].value)
except OverflowError:
pass
if "WritingApp/unicode" in info:
self.producer = info["WritingApp/unicode"].value
if "MuxingApp/unicode" in info:
self.producer = info["MuxingApp/unicode"].value
if "Title/unicode" in info:
self.title = info["Title/unicode"].value
class FlvMetadata(MultipleMetadata):
def extract(self, flv):
if "video[0]" in flv:
meta = Metadata(self)
self.extractVideo(flv["video[0]"], meta)
self.addGroup("video", meta, "Video stream")
if "audio[0]" in flv:
meta = Metadata(self)
self.extractAudio(flv["audio[0]"], meta)
self.addGroup("audio", meta, "Audio stream")
# TODO: Computer duration
# One technic: use last video/audio chunk and use timestamp
# But this is very slow
self.format_version = flv.description
if "metadata/entry[1]" in flv:
self.extractAMF(flv["metadata/entry[1]"])
if self.has('duration'):
self.bit_rate = flv.size / timedelta2seconds(self.get('duration'))
@fault_tolerant
def extractAudio(self, audio, meta):
if audio["codec"].display == "MP3" and "music_data" in audio:
meta.compression = audio["music_data"].description
else:
meta.compression = audio["codec"].display
meta.sample_rate = audio.getSampleRate()
if audio["is_16bit"].value:
meta.bits_per_sample = 16
else:
meta.bits_per_sample = 8
if audio["is_stereo"].value:
meta.nb_channel = 2
else:
meta.nb_channel = 1
@fault_tolerant
def extractVideo(self, video, meta):
meta.compression = video["codec"].display
def extractAMF(self, amf):
for entry in amf.array("item"):
self.useAmfEntry(entry)
@fault_tolerant
def useAmfEntry(self, entry):
key = entry["key"].value
if key == "duration":
self.duration = timedelta(seconds=entry["value"].value)
elif key == "creator":
self.producer = entry["value"].value
elif key == "audiosamplerate":
self.sample_rate = entry["value"].value
elif key == "framerate":
self.frame_rate = entry["value"].value
elif key == "metadatacreator":
self.producer = entry["value"].value
elif key == "metadatadate":
self.creation_date = entry.value
elif key == "width":
self.width = int(entry["value"].value)
elif key == "height":
self.height = int(entry["value"].value)
class MovMetadata(RootMetadata):
def extract(self, mov):
for atom in mov:
if "movie" in atom:
self.processMovie(atom["movie"])
@fault_tolerant
def processMovieHeader(self, hdr):
self.creation_date = hdr["creation_date"].value
self.last_modification = hdr["lastmod_date"].value
self.duration = timedelta(seconds=float(hdr["duration"].value) / hdr["time_scale"].value)
self.comment = _("Play speed: %.1f%%") % (hdr["play_speed"].value*100)
self.comment = _("User volume: %.1f%%") % (float(hdr["volume"].value)*100//255)
@fault_tolerant
def processTrackHeader(self, hdr):
width = int(hdr["frame_size_width"].value)
height = int(hdr["frame_size_height"].value)
if width and height:
self.width = width
self.height = height
def processTrack(self, atom):
for field in atom:
if "track_hdr" in field:
self.processTrackHeader(field["track_hdr"])
def processMovie(self, atom):
for field in atom:
if "track" in field:
self.processTrack(field["track"])
if "movie_hdr" in field:
self.processMovieHeader(field["movie_hdr"])
class AsfMetadata(MultipleMetadata):
EXT_DESC_TO_ATTR = {
"Encoder": "producer",
"ToolName": "producer",
"AlbumTitle": "album",
"Track": "track_number",
"TrackNumber": "track_total",
"Year": "creation_date",
"AlbumArtist": "author",
}
SKIP_EXT_DESC = set((
# Useless informations
"WMFSDKNeeded", "WMFSDKVersion",
"Buffer Average", "VBR Peak", "EncodingTime",
"MediaPrimaryClassID", "UniqueFileIdentifier",
))
def extract(self, asf):
if "header/content" in asf:
self.processHeader(asf["header/content"])
def processHeader(self, header):
compression = []
is_vbr = None
if "ext_desc/content" in header:
# Extract all data from ext_desc
data = {}
for desc in header.array("ext_desc/content/descriptor"):
self.useExtDescItem(desc, data)
# Have ToolName and ToolVersion? If yes, group them to producer key
if "ToolName" in data and "ToolVersion" in data:
self.producer = "%s (version %s)" % (data["ToolName"], data["ToolVersion"])
del data["ToolName"]
del data["ToolVersion"]
# "IsVBR" key
if "IsVBR" in data:
is_vbr = (data["IsVBR"] == 1)
del data["IsVBR"]
# Store data
for key, value in data.iteritems():
if key in self.EXT_DESC_TO_ATTR:
key = self.EXT_DESC_TO_ATTR[key]
else:
if isinstance(key, str):
key = makePrintable(key, "ISO-8859-1", to_unicode=True)
value = "%s=%s" % (key, value)
key = "comment"
setattr(self, key, value)
if "file_prop/content" in header:
self.useFileProp(header["file_prop/content"], is_vbr)
if "codec_list/content" in header:
for codec in header.array("codec_list/content/codec"):
if "name" in codec:
text = codec["name"].value
if "desc" in codec and codec["desc"].value:
text = "%s (%s)" % (text, codec["desc"].value)
compression.append(text)
audio_index = 1
video_index = 1
for index, stream_prop in enumerate(header.array("stream_prop")):
if "content/audio_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamAudioHeader(stream_prop["content/audio_header"], meta)
if self.addGroup("audio[%u]" % audio_index, meta, "Audio stream #%u" % audio_index):
audio_index += 1
elif "content/video_header" in stream_prop:
meta = Metadata(self)
self.streamProperty(header, index, meta)
self.streamVideoHeader(stream_prop["content/video_header"], meta)
if self.addGroup("video[%u]" % video_index, meta, "Video stream #%u" % video_index):
video_index += 1
if "metadata/content" in header:
info = header["metadata/content"]
try:
self.title = info["title"].value
self.author = info["author"].value
self.copyright = info["copyright"].value
except MissingField:
pass
@fault_tolerant
def streamAudioHeader(self, audio, meta):
if not meta.has("compression"):
meta.compression = audio["twocc"].display
meta.nb_channel = audio["channels"].value
meta.sample_rate = audio["sample_rate"].value
meta.bits_per_sample = audio["bits_per_sample"].value
@fault_tolerant
def streamVideoHeader(self, video, meta):
meta.width = video["width"].value
meta.height = video["height"].value
if "bmp_info" in video:
bmp_info = video["bmp_info"]
if not meta.has("compression"):
meta.compression = bmp_info["codec"].display
meta.bits_per_pixel = bmp_info["bpp"].value
@fault_tolerant
def useExtDescItem(self, desc, data):
if desc["type"].value == ASF_Descriptor.TYPE_BYTE_ARRAY:
# Skip binary data
return
key = desc["name"].value
if "/" in key:
# Replace "WM/ToolName" with "ToolName"
key = key.split("/", 1)[1]
if key in self.SKIP_EXT_DESC:
# Skip some keys
return
value = desc["value"].value
if not value:
return
value = makeUnicode(value)
data[key] = value
@fault_tolerant
def useFileProp(self, prop, is_vbr):
self.creation_date = prop["creation_date"].value
self.duration = prop["play_duration"].value
if prop["seekable"].value:
self.comment = u"Is seekable"
value = prop["max_bitrate"].value
text = prop["max_bitrate"].display
if is_vbr is True:
text = "VBR (%s max)" % text
elif is_vbr is False:
text = "%s (CBR)" % text
else:
text = "%s (max)" % text
self.bit_rate = (value, text)
def streamProperty(self, header, index, meta):
key = "bit_rates/content/bit_rate[%u]/avg_bitrate" % index
if key in header:
meta.bit_rate = header[key].value
# TODO: Use codec list
# It doesn't work when the video uses /header/content/bitrate_mutex
# since the codec list are shared between streams but... how is it
# shared?
# key = "codec_list/content/codec[%u]" % index
# if key in header:
# codec = header[key]
# if "name" in codec:
# text = codec["name"].value
# if "desc" in codec and codec["desc"].value:
# meta.compression = "%s (%s)" % (text, codec["desc"].value)
# else:
# meta.compression = text
registerExtractor(MovFile, MovMetadata)
registerExtractor(AsfFile, AsfMetadata)
registerExtractor(FlvFile, FlvMetadata)
registerExtractor(MkvFile, MkvMetadata)
| gpl-3.0 |
hellwen/mytrade | mytrade/user/models.py | 1 | 1859 | # -*- coding: utf-8 -*-
import datetime as dt
from flask_login import UserMixin
from mytrade.extensions import bcrypt
from mytrade.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
class Role(SurrogatePK, Model):
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.String(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, email, password=None, **kwargs):
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
| bsd-3-clause |
jeffery9/mixprint_addons | account/project/wizard/account_analytic_balance_report.py | 56 | 2147 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_balance(osv.osv_memory):
_name = 'account.analytic.balance'
_description = 'Account Analytic Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'empty_acc': fields.boolean('Empty Accounts ? ', help='Check if you want to display Accounts with 0 balance too.'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'account.analytic.account',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.analytic.account.balance',
'datas': datas,
}
account_analytic_balance()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cuboxi/android_external_chromium_org | third_party/tlslite/tlslite/messages.py | 26 | 23028 | """Classes representing TLS messages."""
from utils.compat import *
from utils.cryptomath import *
from errors import *
from utils.codec import *
from constants import *
from X509 import X509
from X509CertChain import X509CertChain
# The sha module is deprecated in Python 2.6
try:
import sha
except ImportError:
from hashlib import sha1 as sha
# The md5 module is deprecated in Python 2.6
try:
import md5
except ImportError:
from hashlib import md5
class RecordHeader3:
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = False
def create(self, version, type, length):
self.type = type
self.version = version
self.length = length
return self
def write(self):
w = Writer(5)
w.add(self.type, 1)
w.add(self.version[0], 1)
w.add(self.version[1], 1)
w.add(self.length, 2)
return w.bytes
def parse(self, p):
self.type = p.get(1)
self.version = (p.get(1), p.get(1))
self.length = p.get(2)
self.ssl2 = False
return self
class RecordHeader2:
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = True
def parse(self, p):
if p.get(1)!=128:
raise SyntaxError()
self.type = ContentType.handshake
self.version = (2,0)
#We don't support 2-byte-length-headers; could be a problem
self.length = p.get(1)
return self
class Msg:
def preWrite(self, trial):
if trial:
w = Writer()
else:
length = self.write(True)
w = Writer(length)
return w
def postWrite(self, w, trial):
if trial:
return w.index
else:
return w.bytes
class Alert(Msg):
def __init__(self):
self.contentType = ContentType.alert
self.level = 0
self.description = 0
def create(self, description, level=AlertLevel.fatal):
self.level = level
self.description = description
return self
def parse(self, p):
p.setLengthCheck(2)
self.level = p.get(1)
self.description = p.get(1)
p.stopLengthCheck()
return self
def write(self):
w = Writer(2)
w.add(self.level, 1)
w.add(self.description, 1)
return w.bytes
class HandshakeMsg(Msg):
def preWrite(self, handshakeType, trial):
if trial:
w = Writer()
w.add(handshakeType, 1)
w.add(0, 3)
else:
length = self.write(True)
w = Writer(length)
w.add(handshakeType, 1)
w.add(length-4, 3)
return w
class ClientHello(HandshakeMsg):
def __init__(self, ssl2=False):
self.contentType = ContentType.handshake
self.ssl2 = ssl2
self.client_version = (0,0)
self.random = createByteArrayZeros(32)
self.session_id = createByteArraySequence([])
self.cipher_suites = [] # a list of 16-bit values
self.certificate_types = [CertificateType.x509]
self.compression_methods = [] # a list of 8-bit values
self.srp_username = None # a string
self.channel_id = False
self.support_signed_cert_timestamps = False
self.status_request = False
def create(self, version, random, session_id, cipher_suites,
certificate_types=None, srp_username=None):
self.client_version = version
self.random = random
self.session_id = session_id
self.cipher_suites = cipher_suites
self.certificate_types = certificate_types
self.compression_methods = [0]
self.srp_username = srp_username
return self
def parse(self, p):
if self.ssl2:
self.client_version = (p.get(1), p.get(1))
cipherSpecsLength = p.get(2)
sessionIDLength = p.get(2)
randomLength = p.get(2)
self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3))
self.session_id = p.getFixBytes(sessionIDLength)
self.random = p.getFixBytes(randomLength)
if len(self.random) < 32:
zeroBytes = 32-len(self.random)
self.random = createByteArrayZeros(zeroBytes) + self.random
self.compression_methods = [0]#Fake this value
#We're not doing a stopLengthCheck() for SSLv2, oh well..
else:
p.startLengthCheck(3)
self.client_version = (p.get(1), p.get(1))
self.random = p.getFixBytes(32)
self.session_id = p.getVarBytes(1)
self.cipher_suites = p.getVarList(2, 2)
self.compression_methods = p.getVarList(1, 1)
if not p.atLengthCheck():
totalExtLength = p.get(2)
soFar = 0
while soFar != totalExtLength:
extType = p.get(2)
extLength = p.get(2)
if extType == 6:
self.srp_username = bytesToString(p.getVarBytes(1))
elif extType == 7:
self.certificate_types = p.getVarList(1, 1)
elif extType == ExtensionType.channel_id:
self.channel_id = True
elif extType == ExtensionType.signed_cert_timestamps:
if extLength:
raise SyntaxError()
self.support_signed_cert_timestamps = True
elif extType == ExtensionType.status_request:
# Extension contents are currently ignored.
# According to RFC 6066, this is not strictly forbidden
# (although it is suboptimal):
# Servers that receive a client hello containing the
# "status_request" extension MAY return a suitable
# certificate status response to the client along with
# their certificate. If OCSP is requested, they
# SHOULD use the information contained in the extension
# when selecting an OCSP responder and SHOULD include
# request_extensions in the OCSP request.
p.getFixBytes(extLength)
self.status_request = True
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial)
w.add(self.client_version[0], 1)
w.add(self.client_version[1], 1)
w.addFixSeq(self.random, 1)
w.addVarSeq(self.session_id, 1, 1)
w.addVarSeq(self.cipher_suites, 2, 2)
w.addVarSeq(self.compression_methods, 1, 1)
extLength = 0
if self.certificate_types and self.certificate_types != \
[CertificateType.x509]:
extLength += 5 + len(self.certificate_types)
if self.srp_username:
extLength += 5 + len(self.srp_username)
if extLength > 0:
w.add(extLength, 2)
if self.certificate_types and self.certificate_types != \
[CertificateType.x509]:
w.add(7, 2)
w.add(len(self.certificate_types)+1, 2)
w.addVarSeq(self.certificate_types, 1, 1)
if self.srp_username:
w.add(6, 2)
w.add(len(self.srp_username)+1, 2)
w.addVarSeq(stringToBytes(self.srp_username), 1, 1)
return HandshakeMsg.postWrite(self, w, trial)
class ServerHello(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
self.server_version = (0,0)
self.random = createByteArrayZeros(32)
self.session_id = createByteArraySequence([])
self.cipher_suite = 0
self.certificate_type = CertificateType.x509
self.compression_method = 0
self.channel_id = False
self.signed_cert_timestamps = None
self.status_request = False
def create(self, version, random, session_id, cipher_suite,
certificate_type):
self.server_version = version
self.random = random
self.session_id = session_id
self.cipher_suite = cipher_suite
self.certificate_type = certificate_type
self.compression_method = 0
return self
def parse(self, p):
p.startLengthCheck(3)
self.server_version = (p.get(1), p.get(1))
self.random = p.getFixBytes(32)
self.session_id = p.getVarBytes(1)
self.cipher_suite = p.get(2)
self.compression_method = p.get(1)
if not p.atLengthCheck():
totalExtLength = p.get(2)
soFar = 0
while soFar != totalExtLength:
extType = p.get(2)
extLength = p.get(2)
if extType == 7:
self.certificate_type = p.get(1)
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial)
w.add(self.server_version[0], 1)
w.add(self.server_version[1], 1)
w.addFixSeq(self.random, 1)
w.addVarSeq(self.session_id, 1, 1)
w.add(self.cipher_suite, 2)
w.add(self.compression_method, 1)
extLength = 0
if self.certificate_type and self.certificate_type != \
CertificateType.x509:
extLength += 5
if self.channel_id:
extLength += 4
if self.signed_cert_timestamps:
extLength += 4 + len(self.signed_cert_timestamps)
if self.status_request:
extLength += 4
if extLength != 0:
w.add(extLength, 2)
if self.certificate_type and self.certificate_type != \
CertificateType.x509:
w.add(7, 2)
w.add(1, 2)
w.add(self.certificate_type, 1)
if self.channel_id:
w.add(ExtensionType.channel_id, 2)
w.add(0, 2)
if self.signed_cert_timestamps:
w.add(ExtensionType.signed_cert_timestamps, 2)
w.addVarSeq(stringToBytes(self.signed_cert_timestamps), 1, 2)
if self.status_request:
w.add(ExtensionType.status_request, 2)
w.add(0, 2)
return HandshakeMsg.postWrite(self, w, trial)
class Certificate(HandshakeMsg):
def __init__(self, certificateType):
self.certificateType = certificateType
self.contentType = ContentType.handshake
self.certChain = None
def create(self, certChain):
self.certChain = certChain
return self
def parse(self, p):
p.startLengthCheck(3)
if self.certificateType == CertificateType.x509:
chainLength = p.get(3)
index = 0
certificate_list = []
while index != chainLength:
certBytes = p.getVarBytes(3)
x509 = X509()
x509.parseBinary(certBytes)
certificate_list.append(x509)
index += len(certBytes)+3
if certificate_list:
self.certChain = X509CertChain(certificate_list)
elif self.certificateType == CertificateType.cryptoID:
s = bytesToString(p.getVarBytes(2))
if s:
try:
import cryptoIDlib.CertChain
except ImportError:
raise SyntaxError(\
"cryptoID cert chain received, cryptoIDlib not present")
self.certChain = cryptoIDlib.CertChain.CertChain().parse(s)
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial)
if self.certificateType == CertificateType.x509:
chainLength = 0
if self.certChain:
certificate_list = self.certChain.x509List
else:
certificate_list = []
#determine length
for cert in certificate_list:
bytes = cert.writeBytes()
chainLength += len(bytes)+3
#add bytes
w.add(chainLength, 3)
for cert in certificate_list:
bytes = cert.writeBytes()
w.addVarSeq(bytes, 1, 3)
elif self.certificateType == CertificateType.cryptoID:
if self.certChain:
bytes = stringToBytes(self.certChain.write())
else:
bytes = createByteArraySequence([])
w.addVarSeq(bytes, 1, 2)
else:
raise AssertionError()
return HandshakeMsg.postWrite(self, w, trial)
class CertificateStatus(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
def create(self, ocsp_response):
self.ocsp_response = ocsp_response
return self
# Defined for the sake of completeness, even though we currently only
# support sending the status message (server-side), not requesting
# or receiving it (client-side).
def parse(self, p):
p.startLengthCheck(3)
status_type = p.get(1)
# Only one type is specified, so hardwire it.
if status_type != CertificateStatusType.ocsp:
raise SyntaxError()
ocsp_response = p.getVarBytes(3)
if not ocsp_response:
# Can't be empty
raise SyntaxError()
self.ocsp_response = ocsp_response
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate_status,
trial)
w.add(CertificateStatusType.ocsp, 1)
w.addVarSeq(stringToBytes(self.ocsp_response), 1, 3)
return HandshakeMsg.postWrite(self, w, trial)
class CertificateRequest(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
#Apple's Secure Transport library rejects empty certificate_types, so
#default to rsa_sign.
self.certificate_types = [ClientCertificateType.rsa_sign]
self.certificate_authorities = []
def create(self, certificate_types, certificate_authorities):
self.certificate_types = certificate_types
self.certificate_authorities = certificate_authorities
return self
def parse(self, p):
p.startLengthCheck(3)
self.certificate_types = p.getVarList(1, 1)
ca_list_length = p.get(2)
index = 0
self.certificate_authorities = []
while index != ca_list_length:
ca_bytes = p.getVarBytes(2)
self.certificate_authorities.append(ca_bytes)
index += len(ca_bytes)+2
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request,
trial)
w.addVarSeq(self.certificate_types, 1, 1)
caLength = 0
#determine length
for ca_dn in self.certificate_authorities:
caLength += len(ca_dn)+2
w.add(caLength, 2)
#add bytes
for ca_dn in self.certificate_authorities:
w.addVarSeq(ca_dn, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
class ServerKeyExchange(HandshakeMsg):
def __init__(self, cipherSuite):
self.cipherSuite = cipherSuite
self.contentType = ContentType.handshake
self.srp_N = 0L
self.srp_g = 0L
self.srp_s = createByteArraySequence([])
self.srp_B = 0L
self.signature = createByteArraySequence([])
def createSRP(self, srp_N, srp_g, srp_s, srp_B):
self.srp_N = srp_N
self.srp_g = srp_g
self.srp_s = srp_s
self.srp_B = srp_B
return self
def parse(self, p):
p.startLengthCheck(3)
self.srp_N = bytesToNumber(p.getVarBytes(2))
self.srp_g = bytesToNumber(p.getVarBytes(2))
self.srp_s = p.getVarBytes(1)
self.srp_B = bytesToNumber(p.getVarBytes(2))
if self.cipherSuite in CipherSuite.srpRsaSuites:
self.signature = p.getVarBytes(2)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange,
trial)
w.addVarSeq(numberToBytes(self.srp_N), 1, 2)
w.addVarSeq(numberToBytes(self.srp_g), 1, 2)
w.addVarSeq(self.srp_s, 1, 1)
w.addVarSeq(numberToBytes(self.srp_B), 1, 2)
if self.cipherSuite in CipherSuite.srpRsaSuites:
w.addVarSeq(self.signature, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
def hash(self, clientRandom, serverRandom):
oldCipherSuite = self.cipherSuite
self.cipherSuite = None
try:
bytes = clientRandom + serverRandom + self.write()[4:]
s = bytesToString(bytes)
return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest())
finally:
self.cipherSuite = oldCipherSuite
class ServerHelloDone(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
def create(self):
return self
def parse(self, p):
p.startLengthCheck(3)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial)
return HandshakeMsg.postWrite(self, w, trial)
class ClientKeyExchange(HandshakeMsg):
def __init__(self, cipherSuite, version=None):
self.cipherSuite = cipherSuite
self.version = version
self.contentType = ContentType.handshake
self.srp_A = 0
self.encryptedPreMasterSecret = createByteArraySequence([])
def createSRP(self, srp_A):
self.srp_A = srp_A
return self
def createRSA(self, encryptedPreMasterSecret):
self.encryptedPreMasterSecret = encryptedPreMasterSecret
return self
def parse(self, p):
p.startLengthCheck(3)
if self.cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
self.srp_A = bytesToNumber(p.getVarBytes(2))
elif self.cipherSuite in CipherSuite.rsaSuites:
if self.version in ((3,1), (3,2)):
self.encryptedPreMasterSecret = p.getVarBytes(2)
elif self.version == (3,0):
self.encryptedPreMasterSecret = \
p.getFixBytes(len(p.bytes)-p.index)
else:
raise AssertionError()
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange,
trial)
if self.cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
w.addVarSeq(numberToBytes(self.srp_A), 1, 2)
elif self.cipherSuite in CipherSuite.rsaSuites:
if self.version in ((3,1), (3,2)):
w.addVarSeq(self.encryptedPreMasterSecret, 1, 2)
elif self.version == (3,0):
w.addFixSeq(self.encryptedPreMasterSecret, 1)
else:
raise AssertionError()
else:
raise AssertionError()
return HandshakeMsg.postWrite(self, w, trial)
class CertificateVerify(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
self.signature = createByteArraySequence([])
def create(self, signature):
self.signature = signature
return self
def parse(self, p):
p.startLengthCheck(3)
self.signature = p.getVarBytes(2)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify,
trial)
w.addVarSeq(self.signature, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
class ChangeCipherSpec(Msg):
def __init__(self):
self.contentType = ContentType.change_cipher_spec
self.type = 1
def create(self):
self.type = 1
return self
def parse(self, p):
p.setLengthCheck(1)
self.type = p.get(1)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = Msg.preWrite(self, trial)
w.add(self.type,1)
return Msg.postWrite(self, w, trial)
class Finished(HandshakeMsg):
def __init__(self, version):
self.contentType = ContentType.handshake
self.version = version
self.verify_data = createByteArraySequence([])
def create(self, verify_data):
self.verify_data = verify_data
return self
def parse(self, p):
p.startLengthCheck(3)
if self.version == (3,0):
self.verify_data = p.getFixBytes(36)
elif self.version in ((3,1), (3,2)):
self.verify_data = p.getFixBytes(12)
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial)
w.addFixSeq(self.verify_data, 1)
return HandshakeMsg.postWrite(self, w, trial)
class EncryptedExtensions(HandshakeMsg):
def __init__(self):
self.channel_id_key = None
self.channel_id_proof = None
def parse(self, p):
p.startLengthCheck(3)
soFar = 0
while soFar != p.lengthCheck:
extType = p.get(2)
extLength = p.get(2)
if extType == ExtensionType.channel_id:
if extLength != 32*4:
raise SyntaxError()
self.channel_id_key = p.getFixBytes(64)
self.channel_id_proof = p.getFixBytes(64)
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
class ApplicationData(Msg):
def __init__(self):
self.contentType = ContentType.application_data
self.bytes = createByteArraySequence([])
def create(self, bytes):
self.bytes = bytes
return self
def parse(self, p):
self.bytes = p.bytes
return self
def write(self):
return self.bytes
| bsd-3-clause |
DiptoDas8/Biponi | lib/python2.7/site-packages/django/contrib/admin/sites.py | 77 | 22052 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in six.iteritems(self._registry):
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_name = apps.get_app_config(app_label).verbose_name
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
raise PermissionDenied
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_name,
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| mit |
Kast0rTr0y/ansible | lib/ansible/cli/doc.py | 14 | 13631 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader, action_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_modules',
help='Show documentation for all modules')
super(DocCLI, self).parse()
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
# process all modules
if self.options.all_modules:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.args = sorted(set(self.module_list) - module_docs.BLACKLIST_MODULES)
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.format_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
# is there corresponding action plugin?
if module in action_loader:
doc['action'] = True
else:
doc['action'] = False
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
if text:
self.pager(text)
return 0
def find_modules(self, path):
for module in os.listdir(path):
full_path = '/'.join([path, module])
if module.startswith('.'):
continue
elif os.path.isdir(full_path):
continue
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
if os.path.islink(full_path): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
module = module.lstrip('_') # remove underscore from deprecated modules
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py', ignore_deprecated=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs, metadata = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s (%s)\n" % (doc['module'].upper(), doc['filename']))
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
# FUTURE: move deprecation to metadata-only
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
metadata = doc['metadata']
supported_by = metadata['supported_by']
text.append("Supported by: %s\n" % supported_by)
status = metadata['status']
text.append("Status: %s\n" % ", ".join(status))
if 'action' in doc and doc['action']:
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
choices = ''
if 'choices' in opt:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
default = ''
if 'default' in opt or not required:
default = "[Default: " + str(opt.get('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("Notes:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit-6, initial_indent=" * ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PIL-1.1.7-py2.6-win32.egg/FpxImagePlugin.py | 40 | 6181 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library.
# $Id$
#
# FlashPix support for PIL
#
# History:
# 97-01-25 fl Created (reads uncompressed RGB images only)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import Image, ImageFile
from OleFileIO import *
# we map from colour field tuples to (mode, rawmode) descriptors
MODES = {
# opacity
(0x00007ffe): ("A", "L"),
# monochrome
(0x00010000,): ("L", "L"),
(0x00018000, 0x00017ffe): ("RGBA", "LA"),
# photo YCC
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
(0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"),
# standard RGB (NIFRGB)
(0x00030000, 0x00030001, 0x00030002): ("RGB","RGB"),
(0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA","RGBA"),
}
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for the FlashPix images.
class FpxImageFile(ImageFile.ImageFile):
format = "FPX"
format_description = "FlashPix"
def _open(self):
#
# read the OLE directory and see if this is a likely
# to be a FlashPix file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an FPX file; invalid OLE file"
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
raise SyntaxError, "not an FPX file; bad root CLSID"
self._open_index(1)
def _open_index(self, index = 1):
#
# get the Image Contents Property Set
prop = self.ole.getproperties([
"Data Object Store %06d" % index,
"\005Image Contents"
])
# size (highest resolution)
self.size = prop[0x1000002], prop[0x1000003]
size = max(self.size)
i = 1
while size > 64:
size = size / 2
i = i + 1
self.maxid = i - 1
# mode. instead of using a single field for this, flashpix
# requires you to specify the mode for each channel in each
# resolution subimage, and leaves it to the decoder to make
# sure that they all match. for now, we'll cheat and assume
# that this is always the case.
id = self.maxid << 16
s = prop[0x2000002|id]
colors = []
for i in range(i32(s, 4)):
# note: for now, we ignore the "uncalibrated" flag
colors.append(i32(s, 8+i*4) & 0x7fffffff)
self.mode, self.rawmode = MODES[tuple(colors)]
# load JPEG tables, if any
self.jpeg = {}
for i in range(256):
id = 0x3000001|(i << 16)
if prop.has_key(id):
self.jpeg[i] = prop[id]
# print len(self.jpeg), "tables loaded"
self._open_subimage(1, self.maxid)
def _open_subimage(self, index = 1, subimage = 0):
#
# setup tile descriptors for a given subimage
stream = [
"Data Object Store %06d" % index,
"Resolution %04d" % subimage,
"Subimage 0000 Header"
]
fp = self.ole.openstream(stream)
# skip prefix
p = fp.read(28)
# header stream
s = fp.read(36)
size = i32(s, 4), i32(s, 8)
tilecount = i32(s, 12)
tilesize = i32(s, 16), i32(s, 20)
channels = i32(s, 24)
offset = i32(s, 28)
length = i32(s, 32)
# print size, self.mode, self.rawmode
if size != self.size:
raise IOError, "subimage mismatch"
# get tile descriptors
fp.seek(28 + offset)
s = fp.read(i32(s, 12) * length)
x = y = 0
xsize, ysize = size
xtile, ytile = tilesize
self.tile = []
for i in range(0, len(s), length):
compression = i32(s, i+8)
if compression == 0:
self.tile.append(("raw", (x,y,x+xtile,y+ytile),
i32(s, i) + 28, (self.rawmode)))
elif compression == 1:
# FIXME: the fill decoder is not implemented
self.tile.append(("fill", (x,y,x+xtile,y+ytile),
i32(s, i) + 28, (self.rawmode, s[12:16])))
elif compression == 2:
internal_color_conversion = ord(s[14])
jpeg_tables = ord(s[15])
rawmode = self.rawmode
if internal_color_conversion:
# The image is stored as usual (usually YCbCr).
if rawmode == "RGBA":
# For "RGBA", data is stored as YCbCrA based on
# negative RGB. The following trick works around
# this problem :
jpegmode, rawmode = "YCbCrK", "CMYK"
else:
jpegmode = None # let the decoder decide
else:
# The image is stored as defined by rawmode
jpegmode = rawmode
self.tile.append(("jpeg", (x,y,x+xtile,y+ytile),
i32(s, i) + 28, (rawmode, jpegmode)))
# FIXME: jpeg tables are tile dependent; the prefix
# data must be placed in the tile descriptor itself!
if jpeg_tables:
self.tile_prefix = self.jpeg[jpeg_tables]
else:
raise IOError, "unknown/invalid compression"
x = x + xtile
if x >= xsize:
x, y = 0, y + ytile
if y >= ysize:
break # isn't really required
self.stream = stream
self.fp = None
def load(self):
if not self.fp:
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
ImageFile.ImageFile.load(self)
#
# --------------------------------------------------------------------
Image.register_open("FPX", FpxImageFile, _accept)
Image.register_extension("FPX", ".fpx")
| epl-1.0 |
openelisglobal/openelisglobal-sandbox | liquibase/HaitiLNSPMassive/scripts/testEntry.py | 6 | 4008 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
test_names = []
current = []
test_sections = []
sample_types = []
descriptions = []
print_names = []
old_sort = []
uom = []
hand_edit = ['PaCO2','HCO3']
current_tests_file = open('testNameCurrent.txt','r')
name_file = open('testNameDesired.txt','r')
test_section_file = open("testSections.txt",'r')
sample_type_file = open("sampleType.txt")
uom_file = open("newUOM.txt", 'r')
print_name_file = open("printName.txt")
results = open("testResults.txt", 'w')
def convert_to_existing_name( name ):
if name == 'Hemato-Immunologie':
return 'Hematology'
elif name == 'Biochimie':
return 'Biochemistry'
elif name == 'Mycologie':
return 'Mycobacteriology'
elif name == 'Parasitologie':
return 'Parasitology'
elif name == 'Immuno-virologie':
return 'Immunology'
elif name == 'Immuno-Virologie':
return 'Immunology'
elif name == 'CDV':
return 'VCT'
elif name == 'Bacteriologie':
return 'Bacteria'
elif name == 'Serologie- Virologie':
return 'Virologie'
elif name == 'Biologie Moleculaire':
return 'Biologie Moleculaire'
return name
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
def use_uom( uom ):
return len(uom) > 0 and uom != 'n/a'
for line in name_file:
test_names.append(line.strip())
for line in print_name_file:
print_names.append(line.strip())
print_name_file.close()
for line in current_tests_file:
current.append( line.strip())
current_tests_file.close()
for line in test_section_file:
test_sections.append(line.strip())
for line in sample_type_file:
sample_types.append(line.strip())
name_file.close()
test_section_file.close()
for line in uom_file:
uom.append(line.strip())
uom_file.close()
sql_head = "INSERT INTO test( id, uom_id, description, reporting_description, is_active, is_reportable, lastupdated, test_section_id, local_abbrev, sort_order, name )\n\t"
results.write("The following should go in massiveTests.sql Note\n")
sort_count = 10
for row in range(0, len(test_names)):
if len(test_names[row]) > 1:
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
if description not in descriptions:
descriptions.append(description)
if len(current[row]) < 2: #means its new
results.write( sql_head)
results.write("VALUES ( nextval( 'test_seq' ) ," )
if use_uom(uom[row]):
results.write(" ( select id from clinlims.unit_of_measure where name='" + uom[row] + "') , ")
else:
results.write(" null , ")
results.write( description + " , " + esc_char(print_names[row]) + " , 'Y' , 'N' , now() , ")
results.write("(select id from clinlims.test_section where name = '" + convert_to_existing_name(test_sections[row]) + "' ) ,")
results.write( esc_char(test_names[row][:20]) + " ," + str(sort_count) + " , " + esc_char(test_names[row]) + " );\n")
sort_count += 10
else:
if len(test_names[row]) > 1:
description = esc_char(test_names[row] + "(" + sample_types[row] + ")")
old_sort.append("update clinlims.test set sort_order=" + str(sort_count) + " , " )
old_sort.append( " reporting_description = " + esc_char(print_names[row]) + " ")
old_sort.append( " where description = " + description + ";\n")
sort_count += 10
results.write("\n\nThe following should go in testOrder.sql\n\n")
for line in old_sort:
results.write(line)
results.close()
print "Done look for results in testResults.txt" | mpl-2.0 |
michalbe/servo | src/components/script/dom/bindings/codegen/parser/tests/test_treatNonCallableAsNull.py | 106 | 1379 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull1 {
[TreatNonCallableAsNull] attribute Function? onfoo;
attribute Function? onbar;
};
""")
results = parser.finish()
iface = results[1]
attr = iface.members[0]
harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value")
attr = iface.members[1]
harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull2 {
[TreatNonCallableAsNull] attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
[TreatNonCallableAsNull]
interface TestTreatNonCallableAsNull3 {
attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
sicipio/bazel | third_party/py/mock/docs/conf.py | 108 | 6310 | # -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from mock import __version__
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2012, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__[:3]
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False | apache-2.0 |
popazerty/zde-e2 | lib/python/Components/SelectionList.py | 49 | 2073 | from MenuList import MenuList
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT
from Tools.LoadPixmap import LoadPixmap
import skin
selectionpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_on.png"))
def SelectionEntryComponent(description, value, index, selected):
dx, dy, dw, dh = skin.parameters.get("SelectionListDescr",(25, 3, 650, 30))
res = [
(description, value, index, selected),
(eListboxPythonMultiContent.TYPE_TEXT, dx, dy, dw, dh, 0, RT_HALIGN_LEFT, description)
]
if selected:
ix, iy, iw, ih = skin.parameters.get("SelectionListLock",(0, 2, 25, 24))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, ix, iy, iw, ih, selectionpng))
return res
class SelectionList(MenuList):
def __init__(self, list = None, enableWrapAround = False):
MenuList.__init__(self, list or [], enableWrapAround, content = eListboxPythonMultiContent)
font = skin.fonts.get("SelectionList", ("Regular", 20, 30))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
def addSelection(self, description, value, index, selected = True):
self.list.append(SelectionEntryComponent(description, value, index, selected))
self.setList(self.list)
def toggleSelection(self):
idx = self.getSelectedIndex()
item = self.list[idx][0]
self.list[idx] = SelectionEntryComponent(item[0], item[1], item[2], not item[3])
self.setList(self.list)
def getSelectionsList(self):
return [ (item[0][0], item[0][1], item[0][2]) for item in self.list if item[0][3] ]
def toggleAllSelection(self):
for idx,item in enumerate(self.list):
item = self.list[idx][0]
self.list[idx] = SelectionEntryComponent(item[0], item[1], item[2], not item[3])
self.setList(self.list)
def sort(self, sortType=False, flag=False):
# sorting by sortType:
# 0 - description
# 1 - value
# 2 - index
# 3 - selected
self.list.sort(key=lambda x: x[0][sortType],reverse=flag)
self.setList(self.list)
| gpl-2.0 |
qusp/orange3 | Orange/tests/test_table.py | 1 | 66916 | import os
import unittest
from itertools import chain
from math import isnan
import random
from Orange import data
from Orange.data import filter
from Orange.data import Unknown
import numpy as np
from unittest.mock import Mock, MagicMock, patch
class TableTestCase(unittest.TestCase):
def setUp(self):
data.table.dataset_dirs.append("Orange/tests")
def test_indexing_class(self):
d = data.Table("test1")
self.assertEqual([e.get_class() for e in d], ["t", "t", "f"])
cind = len(d.domain) - 1
self.assertEqual([e[cind] for e in d], ["t", "t", "f"])
self.assertEqual([e["d"] for e in d], ["t", "t", "f"])
cvar = d.domain.class_var
self.assertEqual([e[cvar] for e in d], ["t", "t", "f"])
def test_filename(self):
dir = data.table.get_sample_datasets_dir()
d = data.Table("iris")
self.assertEqual(d.__file__, os.path.join(dir, "iris.tab"))
d = data.Table("test2.tab")
self.assertTrue(d.__file__.endswith("test2.tab")) # platform dependent
def test_indexing(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
# regular, discrete
varc = d.domain["c"]
self.assertEqual(d[0, 1], "0")
self.assertEqual(d[0, varc], "0")
self.assertEqual(d[0, "c"], "0")
self.assertEqual(d[0][1], "0")
self.assertEqual(d[0][varc], "0")
self.assertEqual(d[0]["c"], "0")
self.assertEqual(d[np.int_(0), np.int_(1)], "0")
self.assertEqual(d[np.int_(0)][np.int_(1)], "0")
# regular, continuous
varb = d.domain["b"]
self.assertEqual(d[0, 0], 0)
self.assertEqual(d[0, varb], 0)
self.assertEqual(d[0, "b"], 0)
self.assertEqual(d[0][0], 0)
self.assertEqual(d[0][varb], 0)
self.assertEqual(d[0]["b"], 0)
self.assertEqual(d[np.int_(0), np.int_(0)], 0)
self.assertEqual(d[np.int_(0)][np.int_(0)], 0)
# negative
varb = d.domain["b"]
self.assertEqual(d[-2, 0], 3.333)
self.assertEqual(d[-2, varb], 3.333)
self.assertEqual(d[-2, "b"], 3.333)
self.assertEqual(d[-2][0], 3.333)
self.assertEqual(d[-2][varb], 3.333)
self.assertEqual(d[-2]["b"], 3.333)
self.assertEqual(d[np.int_(-2), np.int_(0)], 3.333)
self.assertEqual(d[np.int_(-2)][np.int_(0)], 3.333)
# meta, discrete
vara = d.domain["a"]
metaa = d.domain.index("a")
self.assertEqual(d[0, metaa], "A")
self.assertEqual(d[0, vara], "A")
self.assertEqual(d[0, "a"], "A")
self.assertEqual(d[0][metaa], "A")
self.assertEqual(d[0][vara], "A")
self.assertEqual(d[0]["a"], "A")
self.assertEqual(d[np.int_(0), np.int_(metaa)], "A")
self.assertEqual(d[np.int_(0)][np.int_(metaa)], "A")
# meta, string
vare = d.domain["e"]
metae = d.domain.index("e")
self.assertEqual(d[0, metae], "i")
self.assertEqual(d[0, vare], "i")
self.assertEqual(d[0, "e"], "i")
self.assertEqual(d[0][metae], "i")
self.assertEqual(d[0][vare], "i")
self.assertEqual(d[0]["e"], "i")
self.assertEqual(d[np.int_(0), np.int_(metae)], "i")
self.assertEqual(d[np.int_(0)][np.int_(metae)], "i")
def test_indexing_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
e = d[0]
# regular, discrete
varc = d.domain["c"]
self.assertEqual(e[1], "0")
self.assertEqual(e[varc], "0")
self.assertEqual(e["c"], "0")
self.assertEqual(e[np.int_(1)], "0")
# regular, continuous
varb = d.domain["b"]
self.assertEqual(e[0], 0)
self.assertEqual(e[varb], 0)
self.assertEqual(e["b"], 0)
self.assertEqual(e[np.int_(0)], 0)
# meta, discrete
vara = d.domain["a"]
metaa = d.domain.index("a")
self.assertEqual(e[metaa], "A")
self.assertEqual(e[vara], "A")
self.assertEqual(e["a"], "A")
self.assertEqual(e[np.int_(metaa)], "A")
# meta, string
vare = d.domain["e"]
metae = d.domain.index("e")
self.assertEqual(e[metae], "i")
self.assertEqual(e[vare], "i")
self.assertEqual(e["e"], "i")
self.assertEqual(e[np.int_(metae)], "i")
def test_indexing_assign_value(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
# meta
vara = d.domain["a"]
metaa = d.domain.index("a")
self.assertEqual(d[0, "a"], "A")
d[0, "a"] = "B"
self.assertEqual(d[0, "a"], "B")
d[0]["a"] = "A"
self.assertEqual(d[0, "a"], "A")
d[0, vara] = "B"
self.assertEqual(d[0, "a"], "B")
d[0][vara] = "A"
self.assertEqual(d[0, "a"], "A")
d[0, metaa] = "B"
self.assertEqual(d[0, "a"], "B")
d[0][metaa] = "A"
self.assertEqual(d[0, "a"], "A")
d[0, np.int_(metaa)] = "B"
self.assertEqual(d[0, "a"], "B")
d[0][np.int_(metaa)] = "A"
self.assertEqual(d[0, "a"], "A")
# regular
varb = d.domain["b"]
self.assertEqual(d[0, "b"], 0)
d[0, "b"] = 42
self.assertEqual(d[0, "b"], 42)
d[0]["b"] = 0
self.assertEqual(d[0, "b"], 0)
d[0, varb] = 42
self.assertEqual(d[0, "b"], 42)
d[0][varb] = 0
self.assertEqual(d[0, "b"], 0)
d[0, 0] = 42
self.assertEqual(d[0, "b"], 42)
d[0][0] = 0
self.assertEqual(d[0, "b"], 0)
d[0, np.int_(0)] = 42
self.assertEqual(d[0, "b"], 42)
d[0][np.int_(0)] = 0
self.assertEqual(d[0, "b"], 0)
def test_indexing_del_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
initlen = len(d)
# remove first
d[4, "e"] = "4ex"
self.assertEqual(d[4, "e"], "4ex")
del d[0]
self.assertEqual(len(d), initlen - 1)
self.assertEqual(d[3, "e"], "4ex")
# remove middle
del d[2]
self.assertEqual(len(d), initlen - 2)
self.assertEqual(d[2, "e"], "4ex")
# remove middle
del d[4]
self.assertEqual(len(d), initlen - 3)
self.assertEqual(d[2, "e"], "4ex")
# remove last
d[-1, "e"] = "was last"
del d[-1]
self.assertEqual(len(d), initlen - 4)
self.assertEqual(d[2, "e"], "4ex")
self.assertNotEqual(d[-1, "e"], "was last")
# remove one before last
d[-1, "e"] = "was last"
del d[-2]
self.assertEqual(len(d), initlen - 5)
self.assertEqual(d[2, "e"], "4ex")
self.assertEqual(d[-1, "e"], "was last")
d[np.int_(2), "e"] = "2ex"
del d[np.int_(2)]
self.assertEqual(len(d), initlen - 6)
self.assertNotEqual(d[2, "e"], "2ex")
with self.assertRaises(IndexError):
del d[100]
self.assertEqual(len(d), initlen - 6)
with self.assertRaises(IndexError):
del d[-100]
self.assertEqual(len(d), initlen - 6)
def test_indexing_assign_example(self):
def almost_equal_list(s, t):
for e, f in zip(s, t):
self.assertAlmostEqual(e, f)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
vara = d.domain["a"]
metaa = d.domain.index(vara)
self.assertFalse(isnan(d[0, "a"]))
d[0] = ["3.14", "1", "f"]
almost_equal_list(d[0].values(), [3.14, "1", "f"])
self.assertTrue(isnan(d[0, "a"]))
d[0] = [3.15, 1, "t"]
almost_equal_list(d[0].values(), [3.15, "0", "t"])
d[np.int_(0)] = [3.15, 2, "f"]
almost_equal_list(d[0].values(), [3.15, 2, "f"])
with self.assertRaises(ValueError):
d[0] = ["3.14", "1"]
with self.assertRaises(ValueError):
d[np.int_(0)] = ["3.14", "1"]
ex = data.Instance(d.domain, ["3.16", "1", "f"])
d[0] = ex
almost_equal_list(d[0].values(), [3.16, "1", "f"])
ex = data.Instance(d.domain, ["3.16", 2, "t"])
d[np.int_(0)] = ex
almost_equal_list(d[0].values(), [3.16, 2, "t"])
ex = data.Instance(d.domain, ["3.16", "1", "f"])
ex["e"] = "mmmapp"
d[0] = ex
almost_equal_list(d[0].values(), [3.16, "1", "f"])
self.assertEqual(d[0, "e"], "mmmapp")
def test_slice(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
x = d[:3]
self.assertEqual(len(x), 3)
self.assertEqual([e[0] for e in x], [0, 1.1, 2.22])
x = d[2:5]
self.assertEqual(len(x), 3)
self.assertEqual([e[0] for e in x], [2.22, 2.23, 2.24])
x = d[4:1:-1]
self.assertEqual(len(x), 3)
self.assertEqual([e[0] for e in x], [2.24, 2.23, 2.22])
x = d[-3:]
self.assertEqual(len(x), 3)
self.assertEqual([e[0] for e in x], [2.26, 3.333, Unknown])
def test_assign_slice_value(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
d[2:5, 0] = 42
self.assertEqual([e[0] for e in d],
[0, 1.1, 42, 42, 42, 2.25, 2.26, 3.333, Unknown])
d[:3, "b"] = 43
self.assertEqual([e[0] for e in d],
[43, 43, 43, 42, 42, 2.25, 2.26, 3.333, None])
d[-2:, d.domain[0]] = 44
self.assertEqual([e[0] for e in d],
[43, 43, 43, 42, 42, 2.25, 2.26, 44, 44])
d[2:5, "a"] = "A"
self.assertEqual([e["a"] for e in d], list("ABAAACCDE"))
def test_del_slice_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
vals = [e[0] for e in d]
del d[2:2]
self.assertEqual([e[0] for e in d], vals)
del d[2:5]
del vals[2:5]
self.assertEqual([e[0] for e in d], vals)
del d[:]
self.assertEqual(len(d), 0)
def test_set_slice_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
d[5, 0] = 42
d[:3] = d[5]
self.assertEqual(d[1, 0], 42)
d[5:2:-1] = [3, None, None]
self.assertEqual([e[0] for e in d],
[42, 42, 42, 3, 3, 3, 2.26, 3.333, None])
self.assertTrue(isnan(d[3, 2]))
d[2:5] = 42
self.assertTrue(np.all(d.X[2:5] == 42))
self.assertEqual(d.Y[2], 0)
def test_multiple_indices(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
with self.assertRaises(IndexError):
x = d[2, 5, 1]
with self.assertRaises(IndexError):
x = d[(2, 5, 1)]
x = d[[2, 5, 1]]
self.assertEqual([e[0] for e in x], [2.22, 2.25, 1.1])
def test_assign_multiple_indices_value(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
d[1:4, "b"] = 42
self.assertEqual([e[0] for e in d],
[0, 42, 42, 42, 2.24, 2.25, 2.26, 3.333, None])
d[range(5, 2, -1), "b"] = None
self.assertEqual([e[d.domain[0]] for e in d],
[0, 42, 42, None, "?", "", 2.26, 3.333, None])
def test_del_multiple_indices_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
vals = [e[0] for e in d]
del d[[1, 5, 2]]
del vals[5]
del vals[2]
del vals[1]
self.assertEqual([e[0] for e in d], vals)
del d[range(1, 3)]
del vals[1:3]
self.assertEqual([e[0] for e in d], vals)
def test_set_multiple_indices_example(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
d = data.Table("test2")
vals = [e[0] for e in d]
d[[1, 2, 5]] = [42, None, None]
vals[1] = vals[2] = vals[5] = 42
self.assertEqual([e[0] for e in d], vals)
def test_views(self):
d = data.Table("zoo")
crc = d.checksum(True)
x = d[:20]
self.assertEqual(crc, d.checksum(True))
del x[13]
self.assertEqual(crc, d.checksum(True))
del x[4:9]
self.assertEqual(crc, d.checksum(True))
def test_bool(self):
d = data.Table("iris")
self.assertTrue(d)
del d[:]
self.assertFalse(d)
d = data.Table("test3")
self.assertFalse(d)
d = data.Table("iris")
self.assertTrue(d)
d.clear()
self.assertFalse(d)
def test_checksum(self):
d = data.Table("zoo")
d[42, 3] = 0
crc1 = d.checksum(False)
d[42, 3] = 1
crc2 = d.checksum(False)
self.assertNotEqual(crc1, crc2)
d[42, 3] = 0
crc3 = d.checksum(False)
self.assertEqual(crc1, crc3)
_ = d[42, "name"]
d[42, "name"] = "non-animal"
crc4 = d.checksum(False)
self.assertEqual(crc1, crc4)
crc4 = d.checksum(True)
crc5 = d.checksum(1)
crc6 = d.checksum(False)
self.assertNotEqual(crc1, crc4)
self.assertNotEqual(crc1, crc5)
self.assertEqual(crc1, crc6)
def test_total_weight(self):
d = data.Table("zoo")
self.assertEqual(d.total_weight(), len(d))
d.set_weights(0)
d[0].weight = 0.1
d[10].weight = 0.2
d[-1].weight = 0.3
self.assertAlmostEqual(d.total_weight(), 0.6)
del d[10]
self.assertAlmostEqual(d.total_weight(), 0.4)
d.clear()
self.assertAlmostEqual(d.total_weight(), 0)
def test_has_missing(self):
d = data.Table("zoo")
self.assertFalse(d.has_missing())
self.assertFalse(d.has_missing_class())
d[10, 3] = "?"
self.assertTrue(d.has_missing())
self.assertFalse(d.has_missing_class())
d[10].set_class("?")
self.assertTrue(d.has_missing())
self.assertTrue(d.has_missing_class())
d = data.Table("test3")
self.assertFalse(d.has_missing())
self.assertFalse(d.has_missing_class())
def test_shuffle(self):
d = data.Table("zoo")
crc = d.checksum()
names = set(str(x["name"]) for x in d)
d.shuffle()
self.assertNotEqual(crc, d.checksum())
self.assertSetEqual(names, set(str(x["name"]) for x in d))
crc2 = d.checksum()
x = d[2:10]
crcx = x.checksum()
d.shuffle()
self.assertNotEqual(crc2, d.checksum())
self.assertEqual(crcx, x.checksum())
crc2 = d.checksum()
x.shuffle()
self.assertNotEqual(crcx, x.checksum())
self.assertEqual(crc2, d.checksum())
@staticmethod
def not_less_ex(ex1, ex2):
for v1, v2 in zip(ex1, ex2):
if v1 != v2:
return v1 < v2
return True
@staticmethod
def sorted(d):
for i in range(1, len(d)):
if not TableTestCase.not_less_ex(d[i - 1], d[i]):
return False
return True
@staticmethod
def not_less_ex_ord(ex1, ex2, ord):
for a in ord:
if ex1[a] != ex2[a]:
return ex1[a] < ex2[a]
return True
@staticmethod
def sorted_ord(d, ord):
for i in range(1, len(d)):
if not TableTestCase.not_less_ex_ord(d[i - 1], d[i], ord):
return False
return True
def test_append(self):
d = data.Table("test3")
d.append([None] * 3)
self.assertEqual(1, len(d))
self.assertTrue(all(isnan(i) for i in d[0]))
d.append([42, "0", None])
self.assertEqual(2, len(d))
self.assertEqual(d[1], [42, "0", None])
def test_append2(self):
d = data.Table("iris")
d.shuffle()
l1 = len(d)
d.append([1, 2, 3, 4, 0])
self.assertEqual(len(d), l1 + 1)
self.assertEqual(d[-1], [1, 2, 3, 4, 0])
x = data.Instance(d[10])
d.append(x)
self.assertEqual(d[-1], d[10])
x = d[:50]
with self.assertRaises(ValueError):
x.append(d[50])
x.ensure_copy()
x.append(d[50])
self.assertEqual(x[50], d[50])
def test_extend(self):
d = data.Table("iris")
d.shuffle()
x = d[:5]
x.ensure_copy()
d.extend(x)
for i in range(5):
self.assertTrue(d[i] == d[-5 + i])
x = d[:5]
with self.assertRaises(ValueError):
d.extend(x)
def test_convert_through_append(self):
d = data.Table("iris")
dom2 = data.Domain([d.domain[0], d.domain[2], d.domain[4]])
d2 = data.Table(dom2)
dom3 = data.Domain([d.domain[1], d.domain[2]], None)
d3 = data.Table(dom3)
for e in d[:5]:
d2.append(e)
d3.append(e)
for e, e2, e3 in zip(d, d2, d3):
self.assertEqual(e[0], e2[0])
self.assertEqual(e[1], e3[0])
def test_pickle(self):
import pickle
d = data.Table("zoo")
s = pickle.dumps(d)
d2 = pickle.loads(s)
self.assertEqual(d[0], d2[0])
self.assertEqual(d.checksum(include_metas=False),
d2.checksum(include_metas=False))
d = data.Table("iris")
s = pickle.dumps(d)
d2 = pickle.loads(s)
self.assertEqual(d[0], d2[0])
self.assertEqual(d.checksum(include_metas=False),
d2.checksum(include_metas=False))
def test_translate_through_slice(self):
d = data.Table("iris")
dom = data.Domain(["petal length", "sepal length", "iris"],
source=d.domain)
d_ref = d[:10, dom]
self.assertEqual(d_ref.domain.class_var, d.domain.class_var)
self.assertEqual(d_ref[0, "petal length"], d[0, "petal length"])
self.assertEqual(d_ref[0, "sepal length"], d[0, "sepal length"])
self.assertEqual(d_ref.X.shape, (10, 2))
self.assertEqual(d_ref.Y.shape, (10,))
def test_saveTab(self):
d = data.Table("iris")[:3]
d.save("test-save.tab")
try:
d2 = data.Table("test-save.tab")
for e1, e2 in zip(d, d2):
self.assertEqual(e1, e2)
finally:
os.remove("test-save.tab")
dom = data.Domain([data.ContinuousVariable("a")])
d = data.Table(dom)
d += [[i] for i in range(3)]
d.save("test-save.tab")
try:
d2 = data.Table("test-save.tab")
self.assertEqual(len(d.domain.attributes), 1)
self.assertEqual(d.domain.class_var, None)
for i in range(3):
self.assertEqual(d2[i], [i])
finally:
os.remove("test-save.tab")
dom = data.Domain([data.ContinuousVariable("a")], None)
d = data.Table(dom)
d += [[i] for i in range(3)]
d.save("test-save.tab")
try:
d2 = data.Table("test-save.tab")
self.assertEqual(len(d.domain.attributes), 1)
for i in range(3):
self.assertEqual(d2[i], [i])
finally:
os.remove("test-save.tab")
d = data.Table("zoo")
d.save("test-zoo.tab")
dd = data.Table("test-zoo")
try:
self.assertTupleEqual(d.domain.metas, dd.domain.metas, msg="Meta attributes don't match.")
self.assertTupleEqual(d.domain.variables, dd.domain.variables, msg="Attributes don't match.")
for i in range(10):
for j in d.domain.variables:
self.assertEqual(d[i][j], dd[i][j])
finally:
os.remove("test-zoo.tab")
def test_save_pickle(self):
table = data.Table("iris")
try:
table.save("iris.pickle")
table2 = data.Table.from_file("iris.pickle")
np.testing.assert_almost_equal(table.X, table2.X)
np.testing.assert_almost_equal(table.Y, table2.Y)
self.assertIs(table.domain[0], table2.domain[0])
finally:
os.remove("iris.pickle")
def test_from_numpy(self):
import random
a = np.arange(20, dtype="d").reshape((4, 5))
a[:, -1] = [0, 0, 0, 1]
dom = data.Domain([data.ContinuousVariable(x) for x in "abcd"],
data.DiscreteVariable("e", values=["no", "yes"]))
table = data.Table(dom, a)
for i in range(4):
self.assertEqual(table[i].get_class(), "no" if i < 3 else "yes")
for j in range(5):
self.assertEqual(a[i, j], table[i, j])
table[i, j] = random.random()
self.assertEqual(a[i, j], table[i, j])
with self.assertRaises(IndexError):
table[0, -5] = 5
def test_filter_is_defined(self):
d = data.Table("iris")
d[1, 4] = Unknown
self.assertTrue(isnan(d[1, 4]))
d[140, 0] = Unknown
e = filter.IsDefined()(d)
self.assertEqual(len(e), len(d) - 2)
self.assertEqual(e[0], d[0])
self.assertEqual(e[1], d[2])
self.assertEqual(e[147], d[149])
self.assertTrue(d.has_missing())
self.assertFalse(e.has_missing())
def test_filter_has_class(self):
d = data.Table("iris")
d[1, 4] = Unknown
self.assertTrue(isnan(d[1, 4]))
d[140, 0] = Unknown
e = filter.HasClass()(d)
self.assertEqual(len(e), len(d) - 1)
self.assertEqual(e[0], d[0])
self.assertEqual(e[1], d[2])
self.assertEqual(e[148], d[149])
self.assertTrue(d.has_missing())
self.assertTrue(e.has_missing())
self.assertFalse(e.has_missing_class())
def test_filter_random(self):
d = data.Table("iris")
e = filter.Random(50)(d)
self.assertEqual(len(e), 50)
e = filter.Random(50, negate=True)(d)
self.assertEqual(len(e), 100)
for i in range(5):
e = filter.Random(0.2)(d)
self.assertEqual(len(e), 30)
bc = np.bincount(np.array(e.Y[:], dtype=int))
if min(bc) > 7:
break
else:
self.fail("Filter returns too uneven distributions")
def test_filter_same_value(self):
d = data.Table("zoo")
mind = d.domain["type"].to_val("mammal")
lind = d.domain["legs"].to_val("4")
gind = d.domain["name"].to_val("girl")
for pos, val, r in (("type", "mammal", mind),
(len(d.domain.attributes), mind, mind),
("legs", lind, lind),
("name", "girl", gind)):
e = filter.SameValue(pos, val)(d)
f = filter.SameValue(pos, val, negate=True)(d)
self.assertEqual(len(e) + len(f), len(d))
self.assertTrue(all(ex[pos] == r for ex in e))
self.assertTrue(all(ex[pos] != r for ex in f))
def test_filter_value_continuous(self):
d = data.Table("iris")
col = d.X[:, 2]
v = d.columns
f = filter.FilterContinuous(v.petal_length,
filter.FilterContinuous.Between,
min=4.5, max=5.1)
x = filter.Values([f])(d)
self.assertTrue(np.all(4.5 <= x.X[:, 2]))
self.assertTrue(np.all(x.X[:, 2] <= 5.1))
self.assertEqual(sum((col >= 4.5) * (col <= 5.1)), len(x))
f.ref = 5.1
f.oper = filter.FilterContinuous.Equal
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f.oper = filter.FilterContinuous.NotEqual
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] != 5.1))
self.assertEqual(sum(col != 5.1), len(x))
f.oper = filter.FilterContinuous.Less
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] < 5.1))
self.assertEqual(sum(col < 5.1), len(x))
f.oper = filter.FilterContinuous.LessEqual
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] <= 5.1))
self.assertEqual(sum(col <= 5.1), len(x))
f.oper = filter.FilterContinuous.Greater
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] > 5.1))
self.assertEqual(sum(col > 5.1), len(x))
f.oper = filter.FilterContinuous.GreaterEqual
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] >= 5.1))
self.assertEqual(sum(col >= 5.1), len(x))
f.oper = filter.FilterContinuous.Outside
f.ref, f.max = 4.5, 5.1
x = filter.Values([f])(d)
for e in x:
self.assertTrue(e[2] < 4.5 or e[2] > 5.1)
self.assertEqual(sum((col < 4.5) + (col > 5.1)), len(x))
def test_filter_value_continuous_args(self):
d = data.Table("iris")
col = d.X[:, 2]
v = d.columns
f = filter.FilterContinuous(v.petal_length,
filter.FilterContinuous.Equal, ref=5.1)
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous(2,
filter.FilterContinuous.Equal, ref=5.1)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous("petal length",
filter.FilterContinuous.Equal, ref=5.1)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous("sepal length",
filter.FilterContinuous.Equal, ref=5.1)
f.column = 2
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous("sepal length",
filter.FilterContinuous.Equal, ref=5.1)
f.column = v.petal_length
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous(v.petal_length,
filter.FilterContinuous.Equal, ref=18)
f.ref = 5.1
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
f = filter.FilterContinuous(v.petal_length,
filter.FilterContinuous.Equal, ref=18)
f.ref = 5.1
x = filter.Values([f])(d)
self.assertTrue(np.all(x.X[:, 2] == 5.1))
self.assertEqual(sum(col == 5.1), len(x))
def test_valueFilter_discrete(self):
d = data.Table("zoo")
f = filter.FilterDiscrete(d.domain.class_var, values=[2, 3, 4])
for e in filter.Values([f])(d):
self.assertTrue(e.get_class() in [2, 3, 4])
f.values = ["mammal"]
for e in filter.Values([f])(d):
self.assertTrue(e.get_class() == "mammal")
f = filter.FilterDiscrete(d.domain.class_var, values=[2, "mammal"])
for e in filter.Values([f])(d):
self.assertTrue(e.get_class() in [2, "mammal"])
f = filter.FilterDiscrete(d.domain.class_var, values=[2, "martian"])
self.assertRaises(ValueError, d._filter_values, f)
f = filter.FilterDiscrete(d.domain.class_var, values=[2, data.Table])
self.assertRaises(TypeError, d._filter_values, f)
def test_valueFilter_string_case_sens(self):
d = data.Table("zoo")
col = d[:, "name"].metas[:, 0]
f = filter.FilterString("name",
filter.FilterString.Equal, "girl")
x = filter.Values([f])(d)
self.assertEqual(len(x), 1)
self.assertEqual(x[0, "name"], "girl")
self.assertTrue(np.all(x.metas == "girl"))
f.oper = f.NotEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), len(d) - 1)
self.assertTrue(np.all(x[:, "name"] != "girl"))
f.oper = f.Less
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col < "girl"))
self.assertTrue(np.all(x.metas < "girl"))
f.oper = f.LessEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col <= "girl"))
self.assertTrue(np.all(x.metas <= "girl"))
f.oper = f.Greater
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col > "girl"))
self.assertTrue(np.all(x.metas > "girl"))
f.oper = f.GreaterEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col >= "girl"))
self.assertTrue(np.all(x.metas >= "girl"))
f.oper = f.Between
f.max = "lion"
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(("girl" <= col) * (col <= "lion")))
self.assertTrue(np.all(x.metas >= "girl"))
self.assertTrue(np.all(x.metas <= "lion"))
f.oper = f.Outside
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col < "girl") + sum(col > "lion"))
self.assertTrue(np.all((x.metas < "girl") + (x.metas > "lion")))
f.oper = f.Contains
f.ref = "ea"
x = filter.Values([f])(d)
for e in x:
self.assertTrue("ea" in e["name"])
self.assertEqual(len(x), len([e for e in col if "ea" in e]))
f.oper = f.StartsWith
f.ref = "sea"
x = filter.Values([f])(d)
for e in x:
self.assertTrue(str(e["name"]).startswith("sea"))
self.assertEqual(len(x), len([e for e in col if e.startswith("sea")]))
f.oper = f.EndsWith
f.ref = "ion"
x = filter.Values([f])(d)
for e in x:
self.assertTrue(str(e["name"]).endswith("ion"))
self.assertEqual(len(x), len([e for e in col if e.endswith("ion")]))
def test_valueFilter_string_case_insens(self):
d = data.Table("zoo")
d[d[:, "name"].metas[:, 0] == "girl", "name"] = "GIrl"
col = d[:, "name"].metas[:, 0]
f = filter.FilterString("name",
filter.FilterString.Equal, "giRL")
f.case_sensitive = False
x = filter.Values([f])(d)
self.assertEqual(len(x), 1)
self.assertEqual(x[0, "name"], "GIrl")
self.assertTrue(np.all(x.metas == "GIrl"))
f.oper = f.NotEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), len(d) - 1)
self.assertTrue(np.all(x[:, "name"] != "GIrl"))
f.oper = f.Less
f.ref = "CHiCKEN"
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col < "chicken") - 1) # girl!
self.assertTrue(np.all(x.metas < "chicken"))
f.oper = f.LessEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col <= "chicken") - 1)
self.assertTrue(np.all(x.metas <= "chicken"))
f.oper = f.Greater
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col > "chicken") + 1)
for e in x:
self.assertGreater(str(e["name"]).lower(), "chicken")
f.oper = f.GreaterEqual
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col >= "chicken") + 1)
for e in x:
self.assertGreaterEqual(str(e["name"]).lower(), "chicken")
f.oper = f.Between
f.max = "liOn"
x = filter.Values([f])(d)
self.assertEqual(len(x), sum((col >= "chicken") * (col <= "lion")) + 1)
for e in x:
self.assertTrue("chicken" <= str(e["name"]).lower() <= "lion")
f.oper = f.Outside
x = filter.Values([f])(d)
self.assertEqual(len(x), sum(col < "chicken") + sum(col > "lion") - 1)
self.assertTrue(np.all((x.metas < "chicken") + (x.metas > "lion")))
f.oper = f.Contains
f.ref = "iR"
x = filter.Values([f])(d)
for e in x:
self.assertTrue("ir" in str(e["name"]).lower())
self.assertEqual(len(x), len([e for e in col if "ir" in e]) + 1)
f.oper = f.StartsWith
f.ref = "GI"
x = filter.Values([f])(d)
for e in x:
self.assertTrue(str(e["name"]).lower().startswith("gi"))
self.assertEqual(len(x),
len([e for e in col if e.lower().startswith("gi")]))
f.oper = f.EndsWith
f.ref = "ion"
x = filter.Values([f])(d)
for e in x:
self.assertTrue(str(e["name"]).endswith("ion"))
self.assertEqual(len(x), len([e for e in col if e.endswith("ion")]))
#TODO Test conjunctions and disjunctions of conditions
def column_sizes(table):
return (len(table.domain.attributes),
len(table.domain.class_vars),
len(table.domain.metas))
class TableTests(unittest.TestCase):
attributes = ["Feature %i" % i for i in range(10)]
class_vars = ["Class %i" % i for i in range(1)]
metas = ["Meta %i" % i for i in range(5)]
nrows = 10
row_indices = (1, 5, 7, 9)
data = np.random.random((nrows, len(attributes)))
class_data = np.random.random((nrows, len(class_vars)))
meta_data = np.random.random((nrows, len(metas)))
weight_data = np.random.random((nrows, 1))
def setUp(self):
self.data = np.random.random((self.nrows, len(self.attributes)))
self.class_data = np.random.random((self.nrows, len(self.class_vars)))
if len(self.class_vars) == 1:
self.class_data = self.class_data.flatten()
self.meta_data = np.random.randint(0, 5, (self.nrows, len(self.metas))
).astype(object)
self.weight_data = np.random.random((self.nrows, 1))
def mock_domain(self, with_classes=False, with_metas=False):
attributes = self.attributes
class_vars = self.class_vars if with_classes else []
metas = self.metas if with_metas else []
variables = attributes + class_vars
return MagicMock(data.Domain,
attributes=attributes,
class_vars=class_vars,
metas=metas,
variables=variables)
def create_domain(self, attributes=(), classes=(), metas=()):
attr_vars = [data.ContinuousVariable(name=a) if isinstance(a, str)
else a for a in attributes]
class_vars = [data.ContinuousVariable(name=c) if isinstance(c, str)
else c for c in classes]
meta_vars = [data.DiscreteVariable(name=m, values=map(str, range(5)))
if isinstance(m, str) else m for m in metas]
domain = data.Domain(attr_vars, class_vars, meta_vars)
return domain
class CreateEmptyTable(TableTests):
def test_calling_new_with_no_parameters_constructs_a_new_instance(self):
table = data.Table()
self.assertIsInstance(table, data.Table)
def test_table_has_file(self):
table = data.Table()
self.assertIsNone(table.__file__)
class CreateTableWithFilename(TableTests):
filename = "data.tab"
@patch("os.path.exists", Mock(return_value=True))
@patch("Orange.data.io.TabDelimFormat")
def test_read_data_calls_reader(self, reader_mock):
table_mock = Mock(data.Table)
reader_instance = reader_mock.return_value = \
Mock(read_file=Mock(return_value=table_mock))
table = data.Table.from_file(self.filename)
reader_instance.read_file.assert_called_with(self.filename, data.Table)
self.assertEqual(table, table_mock)
@patch("os.path.exists", Mock(return_value=True))
def test_read_data_calls_reader(self):
table_mock = Mock(data.Table)
reader_instance = Mock(read_file=Mock(return_value=table_mock))
with patch.dict(data.io.FileFormats.readers,
{'.xlsx': lambda: reader_instance}):
table = data.Table.from_file("test.xlsx")
reader_instance.read_file.assert_called_with("test.xlsx", data.Table)
self.assertEqual(table, table_mock)
@patch("os.path.exists", Mock(return_value=False))
def test_raises_error_if_file_does_not_exist(self):
with self.assertRaises(IOError):
data.Table.from_file(self.filename)
@patch("os.path.exists", Mock(return_value=True))
def test_raises_error_if_file_has_unknown_extension(self):
with self.assertRaises(IOError):
data.Table.from_file("file.invalid_extension")
@patch("Orange.data.table.Table.from_file")
def test_calling_new_with_string_argument_calls_read_data(self, read_data):
data.Table(self.filename)
read_data.assert_called_with(self.filename)
@patch("Orange.data.table.Table.from_file")
def test_calling_new_with_keyword_argument_filename_calls_read_data(
self, read_data):
data.Table(filename=self.filename)
read_data.assert_called_with(self.filename)
class CreateTableWithUrl(TableTests):
def test_load_from_url(self):
d1 = data.Table('iris')
d2 = data.Table('https://raw.githubusercontent.com/biolab/orange3/master/Orange/datasets/iris.tab')
np.testing.assert_array_equal(d1.X, d2.X)
np.testing.assert_array_equal(d1.Y, d2.Y)
class CreateTableWithDomain(TableTests):
def test_creates_an_empty_table_with_given_domain(self):
domain = self.mock_domain()
table = data.Table.from_domain(domain)
self.assertEqual(table.domain, domain)
def test_creates_zero_filled_rows_in_X_if_domain_contains_attributes(self):
domain = self.mock_domain()
table = data.Table.from_domain(domain, self.nrows)
self.assertEqual(table.X.shape, (self.nrows, len(domain.attributes)))
self.assertFalse(table.X.any())
def test_creates_zero_filled_rows_in_Y_if_domain_contains_class_vars(self):
domain = self.mock_domain(with_classes=True)
table = data.Table.from_domain(domain, self.nrows)
if len(domain.class_vars) != 1:
self.assertEqual(table.Y.shape,
(self.nrows, len(domain.class_vars)))
else:
self.assertEqual(table.Y.shape, (self.nrows,))
self.assertFalse(table.Y.any())
def test_creates_zero_filled_rows_in_metas_if_domain_contains_metas(self):
domain = self.mock_domain(with_metas=True)
table = data.Table.from_domain(domain, self.nrows)
self.assertEqual(table.metas.shape, (self.nrows, len(domain.metas)))
self.assertFalse(table.metas.any())
def test_creates_weights_if_weights_are_true(self):
domain = self.mock_domain()
table = data.Table.from_domain(domain, self.nrows, True)
self.assertEqual(table.W.shape, (self.nrows, ))
def test_does_not_create_weights_if_weights_are_false(self):
domain = self.mock_domain()
table = data.Table.from_domain(domain, self.nrows, False)
self.assertEqual(table.W.shape, (self.nrows, 0))
@patch("Orange.data.table.Table.from_domain")
def test_calling_new_with_domain_calls_new_from_domain(
self, new_from_domain):
domain = self.mock_domain()
data.Table(domain)
new_from_domain.assert_called_with(domain)
class CreateTableWithData(TableTests):
def test_creates_a_table_with_given_X(self):
# from numpy
table = data.Table(np.array(self.data))
self.assertIsInstance(table.domain, data.Domain)
np.testing.assert_almost_equal(table.X, self.data)
# from list
table = data.Table(list(self.data))
self.assertIsInstance(table.domain, data.Domain)
np.testing.assert_almost_equal(table.X, self.data)
# from tuple
table = data.Table(tuple(self.data))
self.assertIsInstance(table.domain, data.Domain)
np.testing.assert_almost_equal(table.X, self.data)
def test_creates_a_table_from_domain_and_list(self):
domain = data.Domain([data.DiscreteVariable(name="a", values="mf"),
data.ContinuousVariable(name="b")],
data.DiscreteVariable(name="y", values="abc"))
table = data.Table(domain, [[0, 1, 2],
[1, 2, "?"],
["m", 3, "a"],
["?", "?", "c"]])
self.assertIs(table.domain, domain)
np.testing.assert_almost_equal(
table.X, np.array([[0, 1], [1, 2], [0, 3], [np.nan, np.nan]]))
np.testing.assert_almost_equal(table.Y, np.array([2, np.nan, 0, 2]))
def test_creates_a_table_from_domain_and_list_and_weights(self):
domain = data.Domain([data.DiscreteVariable(name="a", values="mf"),
data.ContinuousVariable(name="b")],
data.DiscreteVariable(name="y", values="abc"))
table = data.Table(domain, [[0, 1, 2],
[1, 2, "?"],
["m", 3, "a"],
["?", "?", "c"]], [1, 2, 3, 4])
self.assertIs(table.domain, domain)
np.testing.assert_almost_equal(
table.X, np.array([[0, 1], [1, 2], [0, 3], [np.nan, np.nan]]))
np.testing.assert_almost_equal(table.Y, np.array([2, np.nan, 0, 2]))
np.testing.assert_almost_equal(table.W, np.array([1, 2, 3, 4]))
def test_creates_a_table_with_domain_and_given_X(self):
domain = self.mock_domain()
table = data.Table(domain, self.data)
self.assertIsInstance(table.domain, data.Domain)
self.assertEqual(table.domain, domain)
np.testing.assert_almost_equal(table.X, self.data)
def test_creates_a_table_with_given_X_and_Y(self):
table = data.Table(self.data, self.class_data)
self.assertIsInstance(table.domain, data.Domain)
np.testing.assert_almost_equal(table.X, self.data)
np.testing.assert_almost_equal(table.Y, self.class_data)
def test_creates_a_table_with_given_X_Y_and_metas(self):
table = data.Table(self.data, self.class_data, self.meta_data)
self.assertIsInstance(table.domain, data.Domain)
np.testing.assert_almost_equal(table.X, self.data)
np.testing.assert_almost_equal(table.Y, self.class_data)
np.testing.assert_almost_equal(table.metas, self.meta_data)
def test_creates_a_discrete_class_if_Y_has_few_distinct_values(self):
Y = np.array([float(np.random.randint(0, 2)) for i in self.data])
table = data.Table(self.data, Y, self.meta_data)
np.testing.assert_almost_equal(table.Y, Y)
self.assertIsInstance(table.domain.class_vars[0],
data.DiscreteVariable)
self.assertEqual(table.domain.class_vars[0].values, ["v1", "v2"])
def test_creates_a_table_with_given_domain(self):
domain = self.mock_domain()
table = data.Table.from_numpy(domain, self.data)
self.assertEqual(table.domain, domain)
def test_sets_Y_if_given(self):
domain = self.mock_domain(with_classes=True)
table = data.Table.from_numpy(domain, self.data, self.class_data)
np.testing.assert_almost_equal(table.Y, self.class_data)
def test_sets_metas_if_given(self):
domain = self.mock_domain(with_metas=True)
table = data.Table.from_numpy(domain, self.data, metas=self.meta_data)
np.testing.assert_almost_equal(table.metas, self.meta_data)
def test_sets_weights_if_given(self):
domain = self.mock_domain()
table = data.Table.from_numpy(domain, self.data, W=self.weight_data)
np.testing.assert_almost_equal(table.W, self.weight_data)
def test_splits_X_and_Y_if_given_in_same_array(self):
joined_data = np.column_stack((self.data, self.class_data))
domain = self.mock_domain(with_classes=True)
table = data.Table.from_numpy(domain, joined_data)
np.testing.assert_almost_equal(table.X, self.data)
np.testing.assert_almost_equal(table.Y, self.class_data)
def test_initializes_Y_metas_and_W_if_not_given(self):
domain = self.mock_domain()
table = data.Table.from_numpy(domain, self.data)
self.assertEqual(table.Y.shape, (self.nrows, len(domain.class_vars)))
self.assertEqual(table.metas.shape, (self.nrows, len(domain.metas)))
self.assertEqual(table.W.shape, (self.nrows, 0))
def test_raises_error_if_columns_in_domain_and_data_do_not_match(self):
domain = self.mock_domain(with_classes=True, with_metas=True)
ones = np.zeros((self.nrows, 1))
with self.assertRaises(ValueError):
data_ = np.hstack((self.data, ones))
data.Table.from_numpy(domain, data_, self.class_data,
self.meta_data)
with self.assertRaises(ValueError):
classes_ = np.hstack((self.class_data, ones))
data.Table.from_numpy(domain, self.data, classes_,
self.meta_data)
with self.assertRaises(ValueError):
metas_ = np.hstack((self.meta_data, ones))
data.Table.from_numpy(domain, self.data, self.class_data,
metas_)
def test_raises_error_if_lengths_of_data_do_not_match(self):
domain = self.mock_domain(with_classes=True, with_metas=True)
with self.assertRaises(ValueError):
data_ = np.vstack((self.data, np.zeros((1, len(self.attributes)))))
data.Table(domain, data_, self.class_data, self.meta_data)
with self.assertRaises(ValueError):
class_data_ = np.vstack((self.class_data,
np.zeros((1, len(self.class_vars)))))
data.Table(domain, self.data, class_data_, self.meta_data)
with self.assertRaises(ValueError):
meta_data_ = np.vstack((self.meta_data,
np.zeros((1, len(self.metas)))))
data.Table(domain, self.data, self.class_data, meta_data_)
@patch("Orange.data.table.Table.from_numpy")
def test_calling_new_with_domain_and_numpy_arrays_calls_new_from_numpy(
self, new_from_numpy):
domain = self.mock_domain()
data.Table(domain, self.data)
new_from_numpy.assert_called_with(domain, self.data)
domain = self.mock_domain(with_classes=True)
data.Table(domain, self.data, self.class_data)
new_from_numpy.assert_called_with(domain, self.data, self.class_data)
domain = self.mock_domain(with_classes=True, with_metas=True)
data.Table(domain, self.data, self.class_data, self.meta_data)
new_from_numpy.assert_called_with(
domain, self.data, self.class_data, self.meta_data)
data.Table(domain, self.data, self.class_data,
self.meta_data, self.weight_data)
new_from_numpy.assert_called_with(domain, self.data, self.class_data,
self.meta_data, self.weight_data)
def test_from_numpy_reconstructable(self):
def assert_equal(T1, T2):
np.testing.assert_array_equal(T1.X, T2.X)
np.testing.assert_array_equal(T1.Y, T2.Y)
np.testing.assert_array_equal(T1.metas, T2.metas)
np.testing.assert_array_equal(T1.W, T2.W)
nullcol = np.empty((self.nrows, 0))
domain = self.create_domain(self.attributes)
table = data.Table(domain, self.data)
table_1 = data.Table.from_numpy(
domain, table.X, table.Y, table.metas, table.W)
assert_equal(table, table_1)
domain = self.create_domain(classes=self.class_vars)
table = data.Table(domain, nullcol, self.class_data)
table_1 = data.Table.from_numpy(
domain, table.X, table.Y, table.metas, table.W)
assert_equal(table, table_1)
domain = self.create_domain(metas=self.metas)
table = data.Table(domain, nullcol, nullcol, self.meta_data)
table_1 = data.Table.from_numpy(
domain, table.X, table.Y, table.metas, table.W)
assert_equal(table, table_1)
class CreateTableWithDomainAndTable(TableTests):
interesting_slices = [
slice(0, 0), # [0:0] - empty slice
slice(1), # [:1] - only first element
slice(1, None), # [1:] - all but first
slice(-1, None), # [-1:] - only last element
slice(-1), # [:-1] - all but last
slice(None), # [:] - all elements
slice(None, None, 2), # [::2] - even elements
slice(None, None, -1), # [::-1]- all elements reversed
]
row_indices = [1, 5, 6, 7]
def setUp(self):
self.domain = self.create_domain(
self.attributes, self.class_vars, self.metas)
self.table = data.Table(
self.domain, self.data, self.class_data, self.meta_data)
def test_creates_table_with_given_domain(self):
new_table = data.Table.from_table(self.table.domain, self.table)
self.assertIsInstance(new_table, data.Table)
self.assertIsNot(self.table, new_table)
self.assertEqual(new_table.domain, self.domain)
def test_can_copy_table(self):
new_table = data.Table.from_table(self.domain, self.table)
self.assert_table_with_filter_matches(new_table, self.table)
def test_can_filter_rows_with_list(self):
for indices in ([0], [1, 5, 6, 7]):
new_table = data.Table.from_table(
self.domain, self.table, row_indices=indices)
self.assert_table_with_filter_matches(
new_table, self.table, rows=indices)
def test_can_filter_row_with_slice(self):
for slice_ in self.interesting_slices:
new_table = data.Table.from_table(
self.domain, self.table, row_indices=slice_)
self.assert_table_with_filter_matches(
new_table, self.table, rows=slice_)
def test_can_use_attributes_as_new_columns(self):
a, c, m = column_sizes(self.table)
order = [random.randrange(a) for _ in self.domain.attributes]
new_attributes = [self.domain.attributes[i] for i in order]
new_domain = self.create_domain(
new_attributes, new_attributes, new_attributes)
new_table = data.Table.from_table(new_domain, self.table)
self.assert_table_with_filter_matches(
new_table, self.table, xcols=order, ycols=order, mcols=order)
def test_can_use_class_vars_as_new_columns(self):
a, c, m = column_sizes(self.table)
order = [random.randrange(a, a + c) for _ in self.domain.class_vars]
new_classes = [self.domain.class_vars[i - a] for i in order]
new_domain = self.create_domain(new_classes, new_classes, new_classes)
new_table = data.Table.from_table(new_domain, self.table)
self.assert_table_with_filter_matches(
new_table, self.table, xcols=order, ycols=order, mcols=order)
def test_can_use_metas_as_new_columns(self):
a, c, m = column_sizes(self.table)
order = [random.randrange(-m + 1, 0) for _ in self.domain.metas]
new_metas = [self.domain.metas[::-1][i] for i in order]
new_domain = self.create_domain(new_metas, new_metas, new_metas)
new_table = data.Table.from_table(new_domain, self.table)
self.assert_table_with_filter_matches(
new_table, self.table, xcols=order, ycols=order, mcols=order)
def test_can_use_combination_of_all_as_new_columns(self):
a, c, m = column_sizes(self.table)
order = ([random.randrange(a) for _ in self.domain.attributes] +
[random.randrange(a, a + c) for _ in self.domain.class_vars] +
[random.randrange(-m + 1, 0) for _ in self.domain.metas])
random.shuffle(order)
vars = list(self.domain.variables) + list(self.domain.metas[::-1])
vars = [vars[i] for i in order]
new_domain = self.create_domain(vars, vars, vars)
new_table = data.Table.from_table(new_domain, self.table)
self.assert_table_with_filter_matches(
new_table, self.table, xcols=order, ycols=order, mcols=order)
def test_creates_table_with_given_domain_and_row_filter(self):
a, c, m = column_sizes(self.table)
order = ([random.randrange(a) for _ in self.domain.attributes] +
[random.randrange(a, a + c) for _ in self.domain.class_vars] +
[random.randrange(-m + 1, 0) for _ in self.domain.metas])
random.shuffle(order)
vars = list(self.domain.variables) + list(self.domain.metas[::-1])
vars = [vars[i] for i in order]
new_domain = self.create_domain(vars, vars, vars)
new_table = data.Table.from_table(new_domain, self.table, [0])
self.assert_table_with_filter_matches(
new_table, self.table[:1], xcols=order, ycols=order, mcols=order)
new_table = data.Table.from_table(new_domain, self.table, [2, 1, 0])
self.assert_table_with_filter_matches(
new_table, self.table[2::-1], xcols=order, ycols=order, mcols=order)
new_table = data.Table.from_table(new_domain, self.table, [])
self.assert_table_with_filter_matches(
new_table, self.table[:0], xcols=order, ycols=order, mcols=order)
def assert_table_with_filter_matches(
self, new_table, old_table,
rows=..., xcols=..., ycols=..., mcols=...):
a, c, m = column_sizes(old_table)
xcols = slice(a) if xcols is Ellipsis else xcols
ycols = slice(a, a + c) if ycols is Ellipsis else ycols
mcols = slice(None, -m - 1, -1) if mcols is Ellipsis else mcols
# Indexing used by convert_domain uses positive indices for variables
# and classes (classes come after attributes) and negative indices for
# meta features. This is equivalent to ordinary indexing in a magic
# table below.
magic = np.hstack((old_table.X, old_table.Y[:, None],
old_table.metas[:, ::-1]))
np.testing.assert_almost_equal(new_table.X, magic[rows, xcols])
Y = magic[rows, ycols]
if Y.shape[1] == 1:
Y = Y.flatten()
np.testing.assert_almost_equal(new_table.Y, Y)
np.testing.assert_almost_equal(new_table.metas, magic[rows, mcols])
np.testing.assert_almost_equal(new_table.W, old_table.W[rows])
def isspecial(s):
return isinstance(s, slice) or s is Ellipsis
def split_columns(indices, t):
a, c, m = column_sizes(t)
if indices is ...:
return slice(a), slice(c), slice(m)
elif isinstance(indices, slice):
return indices, slice(0, 0), slice(0, 0)
elif not isinstance(indices, list) and not isinstance(indices, tuple):
indices = [indices]
return (
[t.domain.index(x)
for x in indices if 0 <= t.domain.index(x) < a] or slice(0, 0),
[t.domain.index(x) - a
for x in indices if t.domain.index(x) >= a] or slice(0, 0),
[-t.domain.index(x) - 1
for x in indices if t.domain.index(x) < 0] or slice(0, 0))
def getname(variable):
return variable.name
class TableIndexingTests(TableTests):
def setUp(self):
super().setUp()
d = self.domain = \
self.create_domain(self.attributes, self.class_vars, self.metas)
t = self.table = \
data.Table(self.domain, self.data, self.class_data, self.meta_data)
self.magic_table = \
np.column_stack((self.table.X, self.table.Y,
self.table.metas[:, ::-1]))
self.rows = [0, -1]
self.multiple_rows = [slice(0, 0), ..., slice(1, -1, -1)]
a, c, m = column_sizes(t)
columns = [0, a - 1, a, a + c - 1, -1, -m]
self.columns = chain(columns,
map(lambda x: d[x], columns),
map(lambda x: d[x].name, columns))
self.multiple_columns = chain(
self.multiple_rows,
[d.attributes, d.class_vars, d.metas, [0, a, -1]],
[self.attributes, self.class_vars, self.metas],
[self.attributes + self.class_vars + self.metas])
# TODO: indexing with [[0,1], [0,1]] produces weird results
# TODO: what should be the results of table[1, :]
def test_can_select_a_single_value(self):
for r in self.rows:
for c in self.columns:
value = self.table[r, c]
self.assertAlmostEqual(
value, self.magic_table[r, self.domain.index(c)])
value = self.table[r][c]
self.assertAlmostEqual(
value, self.magic_table[r, self.domain.index(c)])
def test_can_select_a_single_row(self):
for r in self.rows:
row = self.table[r]
new_row = np.hstack(
(self.data[r, :],
self.class_data[r, None]))
np.testing.assert_almost_equal(
np.array(list(row)), new_row)
def test_can_select_a_subset_of_rows_and_columns(self):
for r in self.rows:
for c in self.multiple_columns:
table = self.table[r, c]
attr, cls, metas = split_columns(c, self.table)
X = self.table.X[[r], attr]
if X.ndim == 1:
X = X.reshape(-1, len(table.domain.attributes))
np.testing.assert_almost_equal(table.X, X)
Y = self.table.Y[:, None][[r], cls]
if len(Y.shape) == 1 or Y.shape[1] == 1:
Y = Y.flatten()
np.testing.assert_almost_equal(table.Y, Y)
metas_ = self.table.metas[[r], metas]
if metas_.ndim == 1:
metas_ = metas_.reshape(-1, len(table.domain.metas))
np.testing.assert_almost_equal(table.metas, metas_)
for r in self.multiple_rows:
for c in chain(self.columns, self.multiple_rows):
table = self.table[r, c]
attr, cls, metas = split_columns(c, self.table)
np.testing.assert_almost_equal(table.X, self.table.X[r, attr])
Y = self.table.Y[:, None][r, cls]
if len(Y.shape) > 1 and Y.shape[1] == 1:
Y = Y.flatten()
np.testing.assert_almost_equal(table.Y, Y)
np.testing.assert_almost_equal(table.metas,
self.table.metas[r, metas])
class TableElementAssignmentTest(TableTests):
def setUp(self):
super().setUp()
self.domain = \
self.create_domain(self.attributes, self.class_vars, self.metas)
self.table = \
data.Table(self.domain, self.data, self.class_data, self.meta_data)
def test_can_assign_values(self):
self.table[0, 0] = 42.
self.assertAlmostEqual(self.table.X[0, 0], 42.)
def test_can_assign_values_to_classes(self):
a, c, m = column_sizes(self.table)
self.table[0, a] = 42.
self.assertAlmostEqual(self.table.Y[0], 42.)
def test_can_assign_values_to_metas(self):
self.table[0, -1] = 42.
self.assertAlmostEqual(self.table.metas[0, 0], 42.)
def test_can_assign_rows_to_rows(self):
self.table[0] = self.table[1]
np.testing.assert_almost_equal(
self.table.X[0], self.table.X[1])
np.testing.assert_almost_equal(
self.table.Y[0], self.table.Y[1])
np.testing.assert_almost_equal(
self.table.metas[0], self.table.metas[1])
def test_can_assign_lists(self):
a, c, m = column_sizes(self.table)
new_example = [float(i)
for i in range(len(self.attributes + self.class_vars))]
self.table[0] = new_example
np.testing.assert_almost_equal(
self.table.X[0], np.array(new_example[:a]))
np.testing.assert_almost_equal(
self.table.Y[0], np.array(new_example[a:]))
def test_can_assign_np_array(self):
a, c, m = column_sizes(self.table)
new_example = \
np.array([float(i)
for i in range(len(self.attributes + self.class_vars))])
self.table[0] = new_example
np.testing.assert_almost_equal(self.table.X[0], new_example[:a])
np.testing.assert_almost_equal(self.table.Y[0], new_example[a:])
class InterfaceTest(unittest.TestCase):
"""Basic tests each implementation of Table should pass."""
features = (
data.ContinuousVariable(name="Continuous Feature 1"),
data.ContinuousVariable(name="Continuous Feature 2"),
data.DiscreteVariable(name="Discrete Feature 1", values=[0,1]),
data.DiscreteVariable(name="Discrete Feature 2", values=["value1", "value2"]),
)
class_vars = (
data.ContinuousVariable(name="Continuous Class"),
data.DiscreteVariable(name="Discrete Class")
)
feature_data = (
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
)
class_data = (
(1, 0),
(0, 1),
(1, 0),
(0, 1)
)
data = tuple(a + c for a, c in zip(feature_data, class_data))
nrows = 4
def setUp(self):
self.domain = data.Domain(attributes=self.features, class_vars=self.class_vars)
self.table = data.Table.from_numpy(
self.domain,
np.array(self.feature_data),
np.array(self.class_data),
)
def test_len(self):
self.assertEqual(len(self.table), self.nrows)
def test_row_len(self):
for i in range(self.nrows):
self.assertEqual(len(self.table[i]), len(self.data[i]))
def test_iteration(self):
for row, expected_data in zip(self.table, self.data):
self.assertEqual(tuple(row), expected_data)
def test_row_indexing(self):
for i in range(self.nrows):
self.assertEqual(tuple(self.table[i]), self.data[i])
def test_row_slicing(self):
t = self.table[1:]
self.assertEqual(len(t), self.nrows - 1)
def test_value_indexing(self):
for i in range(self.nrows):
for j in range(len(self.table[i])):
self.assertEqual(self.table[i, j], self.data[i][j])
def test_row_assignment(self):
new_value = 2.
for i in range(self.nrows):
new_row = [new_value] * len(self.data[i])
self.table[i] = np.array(new_row)
self.assertEqual(list(self.table[i]), new_row)
def test_value_assignment(self):
new_value = 0.
for i in range(self.nrows):
for j in range(len(self.table[i])):
self.table[i, j] = new_value
self.assertEqual(self.table[i, j], new_value)
def test_append_rows(self):
new_value = 2
new_row = [new_value] * len(self.data[0])
self.table.append(new_row)
self.assertEqual(list(self.table[-1]), new_row)
def test_insert_rows(self):
new_value = 2
new_row = [new_value] * len(self.data[0])
self.table.insert(0, new_row)
self.assertEqual(list(self.table[0]), new_row)
for row, expected in zip(self.table[1:], self.data):
self.assertEqual(tuple(row), expected)
def test_delete_rows(self):
for i in range(self.nrows):
del self.table[0]
for j in range(len(self.table)):
self.assertEqual(tuple(self.table[j]), self.data[i+j+1])
def test_clear(self):
self.table.clear()
self.assertEqual(len(self.table), 0)
for i in self.table:
self.fail("Table should not contain any rows.")
class TestRowInstance(unittest.TestCase):
def test_assignment(self):
table = data.Table("zoo")
inst = table[2]
self.assertIsInstance(inst, data.RowInstance)
inst[1] = 0
self.assertEqual(table[2, 1], 0)
inst[1] = 1
self.assertEqual(table[2, 1], 1)
inst.set_class("mammal")
self.assertEqual(table[2, len(table.domain.attributes)], "mammal")
inst.set_class("fish")
self.assertEqual(table[2, len(table.domain.attributes)], "fish")
inst[-1] = "Foo"
self.assertEqual(table[2, -1], "Foo")
def test_iteration_with_assignment(self):
table = data.Table("iris")
for i, row in enumerate(table):
row[0] = i
np.testing.assert_array_equal(table.X[:, 0], np.arange(len(table)))
if __name__ == "__main__":
unittest.main()
| bsd-2-clause |
worthwhile/cmsplugin-remote-form | cmsplugin_remote_form/migrations/0001_initial.py | 1 | 4950 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-28 13:52
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='ContactRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_entry', models.DateTimeField(auto_now_add=True)),
('date_processed', models.DateTimeField(blank=True, help_text='Date the Record was processed.', null=True)),
('data', jsonfield.fields.JSONField(blank=True, default={}, null=True)),
],
options={
'ordering': ['date_of_entry', 'contact_form'],
'verbose_name': 'Contact Record',
'verbose_name_plural': 'Contact Records',
},
),
migrations.CreateModel(
name='ExtraField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100, verbose_name='Label')),
('fieldType', models.CharField(choices=[(b'CharField', b'CharField'), (b'BooleanField', b'BooleanField'), (b'EmailField', b'EmailField'), (b'DecimalField', b'DecimalField'), (b'FloatField', b'FloatField'), (b'IntegerField', b'IntegerField'), (b'FileField', b'FileField'), (b'ImageField', b'ImageField'), (b'IPAddressField', b'IPAddressField'), (b'MathCaptcha', b'Math Captcha'), (b'auto_Textarea', 'CharField as Textarea'), (b'auto_hidden_input', 'CharField as HiddenInput'), (b'auto_referral_page', 'Referral page as HiddenInput'), (b'auto_GET_parameter', 'GET parameter as HiddenInput'), (b'CharFieldWithValidator', b'CharFieldWithValidator'), (b'ReCaptcha', b'reCAPTCHA')], max_length=100)),
('initial', models.CharField(blank=True, max_length=250, null=True, verbose_name='Inital Value')),
('required', models.BooleanField(default=True, verbose_name='Mandatory field')),
('widget', models.CharField(blank=True, help_text='Will be ignored in the current version.', max_length=250, null=True, verbose_name='Widget')),
('inline_ordering_position', models.IntegerField(blank=True, null=True)),
],
options={
'ordering': ('inline_ordering_position',),
},
),
migrations.CreateModel(
name='RemoteForm',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cmsplugin_remote_form_remoteform', serialize=False, to='cms.CMSPlugin')),
('post_url', models.CharField(default=b'#remoteURL', max_length=200, null=True, verbose_name='Remote URL')),
('submit_button_text', models.CharField(blank=True, max_length=30, verbose_name='Text for the Submit button.')),
('thanks', models.TextField(verbose_name='Message displayed after submitting the contact form.')),
('thanks_in_modal', models.BooleanField(default=True, verbose_name='Show Thanks In Modal')),
('collect_records', models.BooleanField(default=True, help_text='If active, all records for this Form will be stored in the Database.', verbose_name='Collect Records')),
('template', models.CharField(choices=[(b'cmsplugin_remote_form/default.html', b'default.html'), (b'cmsplugin_remote_form/inline.html', b'inline.html')], default=b'cmsplugin_remote_form/default.html', max_length=255)),
('fields_in_row', models.BooleanField(default=False, verbose_name='Put Fields in a .row')),
('field_class', models.CharField(blank=True, max_length=50, verbose_name='CSS class to put on the field.')),
('label_class', models.CharField(blank=True, max_length=50, verbose_name='CSS class to put on the label.')),
],
options={
'verbose_name': 'Remote Form',
'verbose_name_plural': 'Remote Forms',
},
bases=('cms.cmsplugin',),
),
migrations.AddField(
model_name='extrafield',
name='form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cmsplugin_remote_form.RemoteForm', verbose_name='Contact Form'),
),
migrations.AddField(
model_name='contactrecord',
name='contact_form',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='cmsplugin_remote_form.RemoteForm', verbose_name='Contact Form'),
),
]
| bsd-3-clause |
CoBiG2/RAD_Tools | struct_to_distruct.py | 1 | 1435 | #!/usr/bin/python3
# Copyright 2015 Francisco Pina Martins <f.pinamartins@gmail.com>
# This file is part of struct_to_distruct.
# struct_to_distruct is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# struct_to_distruct is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with struct_to_distruct. If not, see <http://www.gnu.org/licenses/>.
#Creates indfile and popfile from structure output.
#Useage: python struct_to_distruct.py "structure_output_file" "directory_to_create_popfile_and_indfile"
from sys import argv
import re
infile = open(argv[1],'r')
popfile = open(argv[2] + 'popfile','w')
indfile = open(argv[2] + 'indfile','w')
indzone = 0
for lines in infile:
if re.match("\d+:", lines.strip()):
popfile.write(lines)
elif lines.strip().endswith("Inferred clusters"):
indzone = 1
continue
if indzone == 1:
indfile.write(lines)
if indzone == 1 and lines.startswith("\n"):
indzone = 0
infile.close()
popfile.close()
indfile.close()
| gpl-3.0 |
aaronsw/watchdog | vendor/webpy/web/contrib/template.py | 100 | 3449 | """
Interface to various templating engines.
"""
import os.path
__all__ = [
"render_cheetah", "render_genshi", "render_mako",
"cache",
]
class render_cheetah:
"""Rendering interface to Cheetah Templates.
Example:
render = render_cheetah('templates')
render.hello(name="cheetah")
"""
def __init__(self, path):
# give error if Chetah is not installed
from Cheetah.Template import Template
self.path = path
def __getattr__(self, name):
from Cheetah.Template import Template
path = os.path.join(self.path, name + ".html")
def template(**kw):
t = Template(file=path, searchList=[kw])
return t.respond()
return template
class render_genshi:
"""Rendering interface genshi templates.
Example:
for xml/html templates.
render = render_genshi(['templates/'])
render.hello(name='genshi')
For text templates:
render = render_genshi(['templates/'], type='text')
render.hello(name='genshi')
"""
def __init__(self, *a, **kwargs):
from genshi.template import TemplateLoader
self._type = kwargs.pop('type', None)
self._loader = TemplateLoader(*a, **kwargs)
def __getattr__(self, name):
# Assuming all templates are html
path = name + ".html"
if self._type == "text":
from genshi.template import TextTemplate
cls = TextTemplate
type = "text"
else:
cls = None
type = None
t = self._loader.load(path, cls=cls)
def template(**kw):
stream = t.generate(**kw)
if type:
return stream.render(type)
else:
return stream.render()
return template
class render_jinja:
"""Rendering interface to Jinja2 Templates
Example:
render= render_jinja('templates')
render.hello(name='jinja2')
"""
def __init__(self, *a, **kwargs):
extensions = kwargs.pop('extensions', [])
globals = kwargs.pop('globals', {})
from jinja2 import Environment,FileSystemLoader
self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)
self._lookup.globals.update(globals)
def __getattr__(self, name):
# Assuming all templates end with .html
path = name + '.html'
t = self._lookup.get_template(path)
return t.render
class render_mako:
"""Rendering interface to Mako Templates.
Example:
render = render_mako(directories=['templates'])
render.hello(name="mako")
"""
def __init__(self, *a, **kwargs):
from mako.lookup import TemplateLookup
self._lookup = TemplateLookup(*a, **kwargs)
def __getattr__(self, name):
# Assuming all templates are html
path = name + ".html"
t = self._lookup.get_template(path)
return t.render
class cache:
"""Cache for any rendering interface.
Example:
render = cache(render_cheetah("templates/"))
render.hello(name='cache')
"""
def __init__(self, render):
self._render = render
self._cache = {}
def __getattr__(self, name):
if name not in self._cache:
self._cache[name] = getattr(self._render, name)
return self._cache[name]
| agpl-3.0 |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/email/quoprimime.py | 246 | 10848 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64mime module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote',
]
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
# See also Charset.py
MISC_LEN = 7
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile(r'[^ !-<>-~\t]')
# Helpers
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return "=%02X" % ord(c)
def header_encode(header, charset="iso-8859-1", keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
quoted = []
if maxlinelen is None:
# An obnoxiously large number that's good enough
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
_max_append(quoted, '_', max_encoded)
# These characters can be included verbatim
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
# Otherwise, replace with hex value like =E2
else:
_max_append(quoted, "=%02X" % ord(c), max_encoded)
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together. BAW: should we be able to specify the leading whitespace in
# the joiner?
joiner = eol + ' '
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
def encode(body, binary=False, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
if not binary:
body = fix_eols(body)
# BAW: We're accumulating the body text by string concatenation. That
# can't be very efficient, but I don't have time now to rewrite it. It
# just feels like this algorithm could be more efficient.
encoded_body = ''
lineno = -1
# Preserve line endings here so we can check later to see an eol needs to
# be added to the output later.
lines = body.splitlines(1)
for line in lines:
# But strip off line-endings for processing this line.
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
# Now we need to examine every character to see if it needs to be
# quopri encoded. BAW: again, string concatenation is inefficient.
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j+1 == linelen:
# Check for whitespace at end of line; special case
if c not in ' \t':
encoded_line += c
prev = c
continue
# Check to see to see if the line has reached its maximum length
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
# Now at end of line..
if prev and prev in ' \t':
# Special case for whitespace at end of file
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
# Just normal whitespace at end of line
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
# Now look at the line we just finished and it has a line ending, we
# need to add eol to the end of the line.
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s)
| mit |
rmfitzpatrick/ansible | lib/ansible/modules/network/cloudengine/ce_interface_ospf.py | 27 | 31192 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_interface_ospf
version_added: "2.4"
short_description: Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
description:
- Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
options:
interface:
description:
- Full name of interface, i.e. 40GE1/0/10.
required: true
process_id:
description:
- Specifies a process ID.
The value is an integer ranging from 1 to 4294967295.
required: true
area:
description:
- Ospf area associated with this ospf process.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer between 1 and 4294967295.
required: true
cost:
description:
- The cost associated with this interface.
Valid values are an integer in the range from 1 to 65535.
required: false
default: null
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer in the range from 1 to 65535.
required: false
default: null
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer in the range from 1 to 235926000.
required: false
default: null
silent_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets. Valid values are 'true' and 'false'.
required: false
default: false
auth_mode:
description:
- Specifies the authentication type.
required: false
choices: ['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple']
default: null
auth_text_simple:
description:
- Specifies a password for simple authentication.
The value is a string of 1 to 8 characters.
required: false
default: null
auth_key_id:
description:
- Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5.
Valid value is an integer is in the range from 1 to 255.
required: false
default: null
auth_text_md5:
description:
- Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication.
The value is a string of 1 to 255 case-sensitive characters, spaces not supported.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
"""
EXAMPLES = '''
- name: eth_trunk module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Enables OSPF and sets the cost on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
cost: 100
provider: '{{ cli }}'
- name: Sets the dead interval of the OSPF neighbor
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
dead_interval: 100
provider: '{{ cli }}'
- name: Sets the interval for sending Hello packets on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
hello_interval: 2
provider: '{{ cli }}'
- name: Disables an interface from receiving and sending OSPF packets
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
silent_interface: true
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30",
"cost": "100", "dead_interval": "40", "hello_interval": "10",
"process_id": "6", "silent_interface": "false", "auth_mode": "none"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface 10GE1/0/30",
"ospf enable 1 area 0.0.0.100",
"ospf cost 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_OSPF = """
<filter type="subtree">
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<routerId></routerId>
<vrfName></vrfName>
<areas>
<area>
<areaId>%s</areaId>
<interfaces>
<interface>
<ifName>%s</ifName>
<networkType></networkType>
<helloInterval></helloInterval>
<deadInterval></deadInterval>
<silentEnable></silentEnable>
<configCost></configCost>
<authenticationMode></authenticationMode>
<authTextSimple></authTextSimple>
<keyId></keyId>
<authTextMd5></authTextMd5>
</interface>
</interfaces>
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</filter>
"""
CE_NC_XML_BUILD_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<areas>
<area>
<areaId>%s</areaId>
%s
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_MERGE_INTF = """
<interfaces>
<interface operation="merge">
%s
</interface>
</interfaces>
"""
CE_NC_XML_BUILD_DELETE_INTF = """
<interfaces>
<interface operation="delete">
%s
</interface>
</interfaces>
"""
CE_NC_XML_SET_IF_NAME = """
<ifName>%s</ifName>
"""
CE_NC_XML_SET_HELLO = """
<helloInterval>%s</helloInterval>
"""
CE_NC_XML_SET_DEAD = """
<deadInterval>%s</deadInterval>
"""
CE_NC_XML_SET_SILENT = """
<silentEnable>%s</silentEnable>
"""
CE_NC_XML_SET_COST = """
<configCost>%s</configCost>
"""
CE_NC_XML_SET_AUTH_MODE = """
<authenticationMode>%s</authenticationMode>
"""
CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """
<authTextSimple>%s</authTextSimple>
"""
CE_NC_XML_SET_AUTH_MD5 = """
<keyId>%s</keyId>
<authTextMd5>%s</authTextMd5>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if not addr:
return False
if addr.find('.') != -1:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
class InterfaceOSPF(object):
"""
Manages configuration of an OSPF interface instance.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.interface = self.module.params['interface']
self.process_id = self.module.params['process_id']
self.area = self.module.params['area']
self.cost = self.module.params['cost']
self.hello_interval = self.module.params['hello_interval']
self.dead_interval = self.module.params['dead_interval']
self.silent_interface = self.module.params['silent_interface']
self.auth_mode = self.module.params['auth_mode']
self.auth_text_simple = self.module.params['auth_text_simple']
self.auth_key_id = self.module.params['auth_key_id']
self.auth_text_md5 = self.module.params['auth_text_md5']
self.state = self.module.params['state']
# ospf info
self.ospf_info = dict()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_area_ip(self):
"""convert integer to ip address"""
if not self.area.isdigit():
return self.area
addr_int = ['0'] * 4
addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF)
addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF)
addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF)
addr_int[3] = str(int(self.area) & 0xFF)
return '.'.join(addr_int)
def get_ospf_dict(self):
""" get one ospf attributes dict."""
ospf_info = dict()
conf_str = CE_NC_GET_OSPF % (
self.process_id, self.get_area_ip(), self.interface)
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return ospf_info
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get process base info
root = ElementTree.fromstring(xml_str)
ospfsite = root.find("data/ospfv2/ospfv2comm/ospfSites/ospfSite")
if not ospfsite:
self.module.fail_json(msg="Error: ospf process does not exist.")
for site in ospfsite:
if site.tag in ["processId", "routerId", "vrfName"]:
ospf_info[site.tag] = site.text
# get areas info
ospf_info["areaId"] = ""
areas = root.find(
"data/ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area")
if areas:
for area in areas:
if area.tag == "areaId":
ospf_info["areaId"] = area.text
break
# get interface info
ospf_info["interface"] = dict()
intf = root.find(
"data/ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area/interfaces/interface")
if intf:
for attr in intf:
if attr.tag in ["ifName", "networkType",
"helloInterval", "deadInterval",
"silentEnable", "configCost",
"authenticationMode", "authTextSimple",
"keyId", "authTextMd5"]:
ospf_info["interface"][attr.tag] = attr.text
return ospf_info
def set_ospf_interface(self):
"""set interface ospf enable, and set its ospf attributes"""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
# ospf view
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
xml_intf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("ospf enable process %s area %s" % (
self.process_id, self.get_area_ip()))
if self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_intf))
self.netconf_set_config(xml_str, "SET_INTERFACE_OSPF")
self.changed = True
def merge_ospf_interface(self):
"""merge interface ospf attributes"""
intf_dict = self.ospf_info["interface"]
# ospf view
xml_ospf = ""
if intf_dict.get("silentEnable") != str(self.silent_interface).lower():
xml_ospf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
xml_intf = ""
self.updates_cmd.append("interface %s" % self.interface)
if self.cost and intf_dict.get("configCost") != self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval and intf_dict.get("helloInterval") != self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval and intf_dict.get("deadInterval") != self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
# NOTE: for security, authentication config will always be update
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
if not xml_intf:
self.updates_cmd.pop() # remove command: interface
if not xml_ospf and not xml_intf:
return
xml_sum = CE_NC_XML_SET_IF_NAME % self.interface
xml_sum += xml_ospf + xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_sum))
self.netconf_set_config(xml_str, "MERGE_INTERFACE_OSPF")
self.changed = True
def unset_ospf_interface(self):
"""set interface ospf disable, and all its ospf attributes will be removed"""
intf_dict = self.ospf_info["interface"]
xml_sum = ""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
if intf_dict.get("silentEnable") == "true":
xml_sum += CE_NC_XML_BUILD_MERGE_INTF % (
xml_intf + (CE_NC_XML_SET_SILENT % "false"))
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
self.updates_cmd.append(
"undo silent-interface %s" % self.interface)
xml_sum += CE_NC_XML_BUILD_DELETE_INTF % xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
xml_sum)
self.netconf_set_config(xml_str, "DELETE_INTERFACE_OSPF")
self.updates_cmd.append("undo ospf cost")
self.updates_cmd.append("undo ospf timer hello")
self.updates_cmd.append("undo ospf timer dead")
self.updates_cmd.append("undo ospf authentication-mode")
self.updates_cmd.append("undo ospf enable %s area %s" % (
self.process_id, self.get_area_ip()))
self.changed = True
def check_params(self):
"""Check all input params"""
self.interface = self.interface.replace(" ", "").upper()
# interface check
if not get_interface_type(self.interface):
self.module.fail_json(msg="Error: interface is invalid.")
# process_id check
if not self.process_id.isdigit():
self.module.fail_json(msg="Error: process_id is not digit.")
if int(self.process_id) < 1 or int(self.process_id) > 4294967295:
self.module.fail_json(msg="Error: process_id must be an integer between 1 and 4294967295.")
# area check
if self.area.isdigit():
if int(self.area) < 0 or int(self.area) > 4294967295:
self.module.fail_json(msg="Error: area id (Integer) must be between 0 and 4294967295.")
else:
if not is_valid_v4addr(self.area):
self.module.fail_json(msg="Error: area id is invalid.")
# area authentication check
if self.state == "present":
if self.auth_mode:
if self.auth_mode == "simple":
if self.auth_text_simple and len(self.auth_text_simple) > 8:
self.module.fail_json(
msg="Error: auth_text_simple is not in the range from 1 to 8.")
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id and not self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if not self.auth_key_id and self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if self.auth_key_id:
if not self.auth_key_id.isdigit():
self.module.fail_json(
msg="Error: auth_key_id is not digit.")
if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255:
self.module.fail_json(
msg="Error: auth_key_id is not in the range from 1 to 255.")
if self.auth_text_md5 and len(self.auth_text_md5) > 255:
self.module.fail_json(
msg="Error: auth_text_md5 is not in the range from 1 to 255.")
# cost check
if self.cost:
if not self.cost.isdigit():
self.module.fail_json(msg="Error: cost is not digit.")
if int(self.cost) < 1 or int(self.cost) > 65535:
self.module.fail_json(
msg="Error: cost is not in the range from 1 to 65535")
# hello_interval check
if self.hello_interval:
if not self.hello_interval.isdigit():
self.module.fail_json(
msg="Error: hello_interval is not digit.")
if int(self.hello_interval) < 1 or int(self.hello_interval) > 65535:
self.module.fail_json(
msg="Error: hello_interval is not in the range from 1 to 65535")
# dead_interval check
if self.dead_interval:
if not self.dead_interval.isdigit():
self.module.fail_json(msg="Error: dead_interval is not digit.")
if int(self.dead_interval) < 1 or int(self.dead_interval) > 235926000:
self.module.fail_json(
msg="Error: dead_interval is not in the range from 1 to 235926000")
def get_proposed(self):
"""get proposed info"""
self.proposed["interface"] = self.interface
self.proposed["process_id"] = self.process_id
self.proposed["area"] = self.get_area_ip()
self.proposed["cost"] = self.cost
self.proposed["hello_interval"] = self.hello_interval
self.proposed["dead_interval"] = self.dead_interval
self.proposed["silent_interface"] = self.silent_interface
if self.auth_mode:
self.proposed["auth_mode"] = self.auth_mode
if self.auth_mode == "simple":
self.proposed["auth_text_simple"] = self.auth_text_simple
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
self.proposed["auth_key_id"] = self.auth_key_id
self.proposed["auth_text_md5"] = self.auth_text_md5
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.ospf_info:
return
if self.ospf_info["interface"]:
self.existing["interface"] = self.interface
self.existing["cost"] = self.ospf_info["interface"].get("configCost")
self.existing["hello_interval"] = self.ospf_info["interface"].get("helloInterval")
self.existing["dead_interval"] = self.ospf_info["interface"].get("deadInterval")
self.existing["silent_interface"] = self.ospf_info["interface"].get("silentEnable")
self.existing["auth_mode"] = self.ospf_info["interface"].get("authenticationMode")
self.existing["auth_text_simple"] = self.ospf_info["interface"].get("authTextSimple")
self.existing["auth_key_id"] = self.ospf_info["interface"].get("keyId")
self.existing["auth_text_md5"] = self.ospf_info["interface"].get("authTextMd5")
self.existing["process_id"] = self.ospf_info["processId"]
self.existing["area"] = self.ospf_info["areaId"]
def get_end_state(self):
"""get end state info"""
ospf_info = self.get_ospf_dict()
if not ospf_info:
return
if ospf_info["interface"]:
self.end_state["interface"] = self.interface
self.end_state["cost"] = ospf_info["interface"].get("configCost")
self.end_state["hello_interval"] = ospf_info["interface"].get("helloInterval")
self.end_state["dead_interval"] = ospf_info["interface"].get("deadInterval")
self.end_state["silent_interface"] = ospf_info["interface"].get("silentEnable")
self.end_state["auth_mode"] = ospf_info["interface"].get("authenticationMode")
self.end_state["auth_text_simple"] = ospf_info["interface"].get("authTextSimple")
self.end_state["auth_key_id"] = ospf_info["interface"].get("keyId")
self.end_state["auth_text_md5"] = ospf_info["interface"].get("authTextMd5")
self.end_state["process_id"] = ospf_info["processId"]
self.end_state["area"] = ospf_info["areaId"]
def work(self):
"""worker"""
self.check_params()
self.ospf_info = self.get_ospf_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.state == "present":
if not self.ospf_info or not self.ospf_info["interface"]:
# create ospf area and set interface config
self.set_ospf_interface()
else:
# merge interface ospf area config
self.merge_ospf_interface()
else:
if self.ospf_info and self.ospf_info["interface"]:
# delete interface ospf area config
self.unset_ospf_interface()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
process_id=dict(required=True, type='str'),
area=dict(required=True, type='str'),
cost=dict(required=False, type='str'),
hello_interval=dict(required=False, type='str'),
dead_interval=dict(required=False, type='str'),
silent_interface=dict(required=False, default=False, type='bool'),
auth_mode=dict(required=False,
choices=['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'),
auth_text_simple=dict(required=False, type='str', no_log=True),
auth_key_id=dict(required=False, type='str'),
auth_text_md5=dict(required=False, type='str', no_log=True),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = InterfaceOSPF(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
Ayysir/PokemonGo-Map | pokemon_pb2.py | 32 | 78759 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pokemon.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pokemon.proto',
package='',
serialized_pb=_b('\n\rpokemon.proto\"\xc5\x06\n\x0eRequestEnvelop\x12\x10\n\x08unknown1\x18\x01 \x02(\x05\x12\x0e\n\x06rpc_id\x18\x03 \x01(\x03\x12*\n\x08requests\x18\x04 \x03(\x0b\x32\x18.RequestEnvelop.Requests\x12*\n\x08unknown6\x18\x06 \x01(\x0b\x32\x18.RequestEnvelop.Unknown6\x12\x10\n\x08latitude\x18\x07 \x01(\x06\x12\x11\n\tlongitude\x18\x08 \x01(\x06\x12\x10\n\x08\x61ltitude\x18\t \x01(\x06\x12&\n\x04\x61uth\x18\n \x01(\x0b\x32\x18.RequestEnvelop.AuthInfo\x12\x1f\n\tunknown11\x18\x0b \x01(\x0b\x32\x0c.UnknownAuth\x12\x11\n\tunknown12\x18\x0c \x01(\x03\x1a)\n\x08Requests\x12\x0c\n\x04type\x18\x01 \x02(\x05\x12\x0f\n\x07message\x18\x02 \x01(\x0c\x1a$\n\x13MessageSingleString\x12\r\n\x05\x62ytes\x18\x01 \x02(\x0c\x1a\x1e\n\x10MessageSingleInt\x12\n\n\x02\x66\x31\x18\x01 \x02(\x03\x1a(\n\x0eMessageTwoInts\x12\n\n\x02\x66\x31\x18\x01 \x02(\x03\x12\n\n\x02\x66\x35\x18\x05 \x02(\x03\x1a@\n\x0bMessageQuad\x12\n\n\x02\x66\x31\x18\x01 \x02(\x0c\x12\n\n\x02\x66\x32\x18\x02 \x02(\x0c\x12\x0b\n\x03lat\x18\x03 \x02(\x06\x12\x0c\n\x04long\x18\x04 \x02(\x06\x1a\x16\n\x03Wat\x12\x0f\n\x04lols\x18\x80\x80\x80@ \x03(\x03\x1aI\n\x08Unknown3\x12\x10\n\x08unknown4\x18\x01 \x02(\x0c\x12\x10\n\x08unknown2\x18\x02 \x01(\x0c\x12\x0b\n\x03lat\x18\x03 \x01(\x06\x12\x0c\n\x04long\x18\x04 \x01(\x06\x1ao\n\x08Unknown6\x12\x10\n\x08unknown1\x18\x01 \x02(\x05\x12\x33\n\x08unknown2\x18\x02 \x02(\x0b\x32!.RequestEnvelop.Unknown6.Unknown2\x1a\x1c\n\x08Unknown2\x12\x10\n\x08unknown1\x18\x01 \x02(\x0c\x1au\n\x08\x41uthInfo\x12\x10\n\x08provider\x18\x01 \x02(\t\x12+\n\x05token\x18\x02 \x02(\x0b\x32\x1c.RequestEnvelop.AuthInfo.JWT\x1a*\n\x03JWT\x12\x10\n\x08\x63ontents\x18\x01 \x02(\t\x12\x11\n\tunknown13\x18\x02 \x02(\x05\"F\n\x0bUnknownAuth\x12\x11\n\tunknown71\x18\x01 \x01(\x0c\x12\x11\n\tunknown72\x18\x02 \x01(\x03\x12\x11\n\tunknown73\x18\x03 \x01(\x0c\"\xd8\x15\n\x0fResponseEnvelop\x12\x10\n\x08unknown1\x18\x01 \x02(\x05\x12\x10\n\x08unknown2\x18\x02 \x01(\x03\x12\x0f\n\x07\x61pi_url\x18\x03 \x01(\t\x12+\n\x08unknown6\x18\x06 \x01(\x0b\x32\x19.ResponseEnvelop.Unknown6\x12\x1e\n\x08unknown7\x18\x07 \x01(\x0b\x32\x0c.UnknownAuth\x12\x0f\n\x07payload\x18\x64 \x03(\x0c\x1ap\n\x08Unknown6\x12\x10\n\x08unknown1\x18\x01 \x02(\x05\x12\x34\n\x08unknown2\x18\x02 \x02(\x0b\x32\".ResponseEnvelop.Unknown6.Unknown2\x1a\x1c\n\x08Unknown2\x12\x10\n\x08unknown1\x18\x01 \x02(\x0c\x1a\x41\n\x10HeartbeatPayload\x12-\n\x05\x63\x65lls\x18\x01 \x03(\x0b\x32\x1e.ResponseEnvelop.ClientMapCell\x1a\xe9\x03\n\rClientMapCell\x12\x10\n\x08S2CellId\x18\x01 \x02(\x04\x12\x12\n\nAsOfTimeMs\x18\x02 \x02(\x03\x12/\n\x04\x46ort\x18\x03 \x03(\x0b\x32!.ResponseEnvelop.PokemonFortProto\x12:\n\nSpawnPoint\x18\x04 \x03(\x0b\x32&.ResponseEnvelop.ClientSpawnPointProto\x12\x36\n\x0bWildPokemon\x18\x05 \x03(\x0b\x32!.ResponseEnvelop.WildPokemonProto\x12\x17\n\x0fIsTruncatedList\x18\x07 \x01(\x08\x12=\n\x0b\x46ortSummary\x18\x08 \x03(\x0b\x32(.ResponseEnvelop.PokemonSummaryFortProto\x12\x43\n\x13\x44\x65\x63imatedSpawnPoint\x18\t \x03(\x0b\x32&.ResponseEnvelop.ClientSpawnPointProto\x12\x34\n\nMapPokemon\x18\n \x03(\x0b\x32 .ResponseEnvelop.MapPokemonProto\x12:\n\rNearbyPokemon\x18\x0b \x03(\x0b\x32#.ResponseEnvelop.NearbyPokemonProto\x1ah\n\x0bWildPokemon\x12\x10\n\x08UniqueId\x18\x01 \x02(\t\x12\x11\n\tPokemonId\x18\x02 \x02(\t\x12\x34\n\x07pokemon\x18\x0b \x03(\x0b\x32#.ResponseEnvelop.NearbyPokemonProto\x1a\x92\x01\n\x0fMapPokemonProto\x12\x14\n\x0cSpawnpointId\x18\x01 \x02(\t\x12\x13\n\x0b\x45ncounterId\x18\x02 \x02(\x04\x12\x15\n\rPokedexTypeId\x18\x03 \x02(\x05\x12\x18\n\x10\x45xpirationTimeMs\x18\x04 \x02(\x03\x12\x10\n\x08Latitude\x18\x05 \x02(\x01\x12\x11\n\tLongitude\x18\x06 \x02(\x01\x1a\x80\x03\n\x10PokemonFortProto\x12\x0e\n\x06\x46ortId\x18\x01 \x02(\t\x12\x16\n\x0eLastModifiedMs\x18\x02 \x02(\x03\x12\x10\n\x08Latitude\x18\x03 \x02(\x01\x12\x11\n\tLongitude\x18\x04 \x02(\x01\x12\x0c\n\x04Team\x18\x05 \x02(\x05\x12\x16\n\x0eGuardPokemonId\x18\x06 \x02(\x05\x12\x19\n\x11GuardPokemonLevel\x18\x07 \x02(\x05\x12\x0f\n\x07\x45nabled\x18\x08 \x02(\x08\x12\x10\n\x08\x46ortType\x18\t \x02(\x05\x12\x11\n\tGymPoints\x18\n \x02(\x03\x12\x12\n\nIsInBattle\x18\x0b \x02(\x08\x12\x1a\n\x12\x41\x63tivePortModifier\x18\x0c \x01(\x0c\x12\x34\n\x08LureInfo\x18\r \x01(\x0b\x32\".ResponseEnvelop.FortLureInfoProto\x12\x1a\n\x12\x43ooldownCompleteMs\x18\x0e \x02(\x03\x12\x0f\n\x07Sponsor\x18\x0f \x02(\x05\x12\x15\n\rRenderingType\x18\x10 \x01(\x05\x1a\x8e\x01\n\x11\x46ortLureInfoProto\x12\x0e\n\x06\x46ortId\x18\x01 \x02(\t\x12\x10\n\x08unknown2\x18\x02 \x02(\x01\x12\x17\n\x0f\x41\x63tivePokemonId\x18\x03 \x02(\x05\x12\x1e\n\x16LureExpiresTimestampMs\x18\x04 \x02(\x03\x12\x1e\n\x16\x44\x65ployerPlayerCodename\x18\x05 \x02(\t\x1am\n\x17PokemonSummaryFortProto\x12\x15\n\rFortSummaryId\x18\x01 \x02(\t\x12\x16\n\x0eLastModifiedMs\x18\x02 \x02(\x03\x12\x10\n\x08Latitude\x18\x03 \x02(\x01\x12\x11\n\tLongitude\x18\x04 \x02(\x01\x1a<\n\x15\x43lientSpawnPointProto\x12\x10\n\x08Latitude\x18\x02 \x02(\x01\x12\x11\n\tLongitude\x18\x03 \x02(\x01\x1a\xfa\x01\n\x10WildPokemonProto\x12\x13\n\x0b\x45ncounterId\x18\x01 \x01(\x04\x12\x16\n\x0eLastModifiedMs\x18\x02 \x01(\x03\x12\x10\n\x08Latitude\x18\x03 \x01(\x01\x12\x11\n\tLongitude\x18\x04 \x01(\x01\x12\x14\n\x0cSpawnPointId\x18\x05 \x01(\t\x12:\n\x07pokemon\x18\x07 \x01(\x0b\x32).ResponseEnvelop.WildPokemonProto.Pokemon\x12\x18\n\x10TimeTillHiddenMs\x18\x0b \x01(\x05\x1a(\n\x07Pokemon\x12\n\n\x02Id\x18\x01 \x01(\x04\x12\x11\n\tPokemonId\x18\x02 \x01(\x05\x1aX\n\x12NearbyPokemonProto\x12\x15\n\rPokedexNumber\x18\x01 \x01(\x05\x12\x16\n\x0e\x44istanceMeters\x18\x02 \x01(\x02\x12\x13\n\x0b\x45ncounterId\x18\x03 \x01(\x04\x1aM\n\x0eProfilePayload\x12\x10\n\x08unknown1\x18\x01 \x02(\x05\x12)\n\x07profile\x18\x02 \x01(\x0b\x32\x18.ResponseEnvelop.Profile\x1a\xaa\x04\n\x07Profile\x12\x15\n\rcreation_time\x18\x01 \x02(\x03\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0c\n\x04team\x18\x05 \x01(\x05\x12\x10\n\x08tutorial\x18\x07 \x01(\x0c\x12\x36\n\x06\x61vatar\x18\x08 \x01(\x0b\x32&.ResponseEnvelop.Profile.AvatarDetails\x12\x14\n\x0cpoke_storage\x18\t \x01(\x05\x12\x14\n\x0citem_storage\x18\n \x01(\x05\x12\x38\n\x0b\x64\x61ily_bonus\x18\x0b \x01(\x0b\x32#.ResponseEnvelop.Profile.DailyBonus\x12\x11\n\tunknown12\x18\x0c \x01(\x0c\x12\x11\n\tunknown13\x18\r \x01(\x0c\x12\x33\n\x08\x63urrency\x18\x0e \x03(\x0b\x32!.ResponseEnvelop.Profile.Currency\x1aX\n\rAvatarDetails\x12\x10\n\x08unknown2\x18\x02 \x01(\x05\x12\x10\n\x08unknown3\x18\x03 \x01(\x05\x12\x10\n\x08unknown9\x18\t \x01(\x05\x12\x11\n\tunknown10\x18\n \x01(\x05\x1aY\n\nDailyBonus\x12\x1e\n\x16NextCollectTimestampMs\x18\x01 \x01(\x03\x12+\n#NextDefenderBonusCollectTimestampMs\x18\x02 \x01(\x03\x1a(\n\x08\x43urrency\x12\x0c\n\x04type\x18\x01 \x02(\t\x12\x0e\n\x06\x61mount\x18\x02 \x01(\x05')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTENVELOP_REQUESTS = _descriptor.Descriptor(
name='Requests',
full_name='RequestEnvelop.Requests',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RequestEnvelop.Requests.type', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='RequestEnvelop.Requests.message', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=305,
serialized_end=346,
)
_REQUESTENVELOP_MESSAGESINGLESTRING = _descriptor.Descriptor(
name='MessageSingleString',
full_name='RequestEnvelop.MessageSingleString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bytes', full_name='RequestEnvelop.MessageSingleString.bytes', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=348,
serialized_end=384,
)
_REQUESTENVELOP_MESSAGESINGLEINT = _descriptor.Descriptor(
name='MessageSingleInt',
full_name='RequestEnvelop.MessageSingleInt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f1', full_name='RequestEnvelop.MessageSingleInt.f1', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=386,
serialized_end=416,
)
_REQUESTENVELOP_MESSAGETWOINTS = _descriptor.Descriptor(
name='MessageTwoInts',
full_name='RequestEnvelop.MessageTwoInts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f1', full_name='RequestEnvelop.MessageTwoInts.f1', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f5', full_name='RequestEnvelop.MessageTwoInts.f5', index=1,
number=5, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=418,
serialized_end=458,
)
_REQUESTENVELOP_MESSAGEQUAD = _descriptor.Descriptor(
name='MessageQuad',
full_name='RequestEnvelop.MessageQuad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='f1', full_name='RequestEnvelop.MessageQuad.f1', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f2', full_name='RequestEnvelop.MessageQuad.f2', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lat', full_name='RequestEnvelop.MessageQuad.lat', index=2,
number=3, type=6, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long', full_name='RequestEnvelop.MessageQuad.long', index=3,
number=4, type=6, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=460,
serialized_end=524,
)
_REQUESTENVELOP_WAT = _descriptor.Descriptor(
name='Wat',
full_name='RequestEnvelop.Wat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lols', full_name='RequestEnvelop.Wat.lols', index=0,
number=134217728, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=526,
serialized_end=548,
)
_REQUESTENVELOP_UNKNOWN3 = _descriptor.Descriptor(
name='Unknown3',
full_name='RequestEnvelop.Unknown3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown4', full_name='RequestEnvelop.Unknown3.unknown4', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown2', full_name='RequestEnvelop.Unknown3.unknown2', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lat', full_name='RequestEnvelop.Unknown3.lat', index=2,
number=3, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='long', full_name='RequestEnvelop.Unknown3.long', index=3,
number=4, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=550,
serialized_end=623,
)
_REQUESTENVELOP_UNKNOWN6_UNKNOWN2 = _descriptor.Descriptor(
name='Unknown2',
full_name='RequestEnvelop.Unknown6.Unknown2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='RequestEnvelop.Unknown6.Unknown2.unknown1', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=708,
serialized_end=736,
)
_REQUESTENVELOP_UNKNOWN6 = _descriptor.Descriptor(
name='Unknown6',
full_name='RequestEnvelop.Unknown6',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='RequestEnvelop.Unknown6.unknown1', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown2', full_name='RequestEnvelop.Unknown6.unknown2', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REQUESTENVELOP_UNKNOWN6_UNKNOWN2, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=625,
serialized_end=736,
)
_REQUESTENVELOP_AUTHINFO_JWT = _descriptor.Descriptor(
name='JWT',
full_name='RequestEnvelop.AuthInfo.JWT',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='contents', full_name='RequestEnvelop.AuthInfo.JWT.contents', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown13', full_name='RequestEnvelop.AuthInfo.JWT.unknown13', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=813,
serialized_end=855,
)
_REQUESTENVELOP_AUTHINFO = _descriptor.Descriptor(
name='AuthInfo',
full_name='RequestEnvelop.AuthInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='provider', full_name='RequestEnvelop.AuthInfo.provider', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='token', full_name='RequestEnvelop.AuthInfo.token', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REQUESTENVELOP_AUTHINFO_JWT, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=738,
serialized_end=855,
)
_REQUESTENVELOP = _descriptor.Descriptor(
name='RequestEnvelop',
full_name='RequestEnvelop',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='RequestEnvelop.unknown1', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rpc_id', full_name='RequestEnvelop.rpc_id', index=1,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='requests', full_name='RequestEnvelop.requests', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown6', full_name='RequestEnvelop.unknown6', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='latitude', full_name='RequestEnvelop.latitude', index=4,
number=7, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='longitude', full_name='RequestEnvelop.longitude', index=5,
number=8, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='altitude', full_name='RequestEnvelop.altitude', index=6,
number=9, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='auth', full_name='RequestEnvelop.auth', index=7,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown11', full_name='RequestEnvelop.unknown11', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown12', full_name='RequestEnvelop.unknown12', index=9,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REQUESTENVELOP_REQUESTS, _REQUESTENVELOP_MESSAGESINGLESTRING, _REQUESTENVELOP_MESSAGESINGLEINT, _REQUESTENVELOP_MESSAGETWOINTS, _REQUESTENVELOP_MESSAGEQUAD, _REQUESTENVELOP_WAT, _REQUESTENVELOP_UNKNOWN3, _REQUESTENVELOP_UNKNOWN6, _REQUESTENVELOP_AUTHINFO, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=855,
)
_UNKNOWNAUTH = _descriptor.Descriptor(
name='UnknownAuth',
full_name='UnknownAuth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown71', full_name='UnknownAuth.unknown71', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown72', full_name='UnknownAuth.unknown72', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown73', full_name='UnknownAuth.unknown73', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=857,
serialized_end=927,
)
_RESPONSEENVELOP_UNKNOWN6_UNKNOWN2 = _descriptor.Descriptor(
name='Unknown2',
full_name='ResponseEnvelop.Unknown6.Unknown2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='ResponseEnvelop.Unknown6.Unknown2.unknown1', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=708,
serialized_end=736,
)
_RESPONSEENVELOP_UNKNOWN6 = _descriptor.Descriptor(
name='Unknown6',
full_name='ResponseEnvelop.Unknown6',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='ResponseEnvelop.Unknown6.unknown1', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown2', full_name='ResponseEnvelop.Unknown6.unknown2', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEENVELOP_UNKNOWN6_UNKNOWN2, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1096,
serialized_end=1208,
)
_RESPONSEENVELOP_HEARTBEATPAYLOAD = _descriptor.Descriptor(
name='HeartbeatPayload',
full_name='ResponseEnvelop.HeartbeatPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cells', full_name='ResponseEnvelop.HeartbeatPayload.cells', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1210,
serialized_end=1275,
)
_RESPONSEENVELOP_CLIENTMAPCELL = _descriptor.Descriptor(
name='ClientMapCell',
full_name='ResponseEnvelop.ClientMapCell',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='S2CellId', full_name='ResponseEnvelop.ClientMapCell.S2CellId', index=0,
number=1, type=4, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='AsOfTimeMs', full_name='ResponseEnvelop.ClientMapCell.AsOfTimeMs', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Fort', full_name='ResponseEnvelop.ClientMapCell.Fort', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='SpawnPoint', full_name='ResponseEnvelop.ClientMapCell.SpawnPoint', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='WildPokemon', full_name='ResponseEnvelop.ClientMapCell.WildPokemon', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='IsTruncatedList', full_name='ResponseEnvelop.ClientMapCell.IsTruncatedList', index=5,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='FortSummary', full_name='ResponseEnvelop.ClientMapCell.FortSummary', index=6,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='DecimatedSpawnPoint', full_name='ResponseEnvelop.ClientMapCell.DecimatedSpawnPoint', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='MapPokemon', full_name='ResponseEnvelop.ClientMapCell.MapPokemon', index=8,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='NearbyPokemon', full_name='ResponseEnvelop.ClientMapCell.NearbyPokemon', index=9,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1278,
serialized_end=1767,
)
_RESPONSEENVELOP_WILDPOKEMON = _descriptor.Descriptor(
name='WildPokemon',
full_name='ResponseEnvelop.WildPokemon',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='UniqueId', full_name='ResponseEnvelop.WildPokemon.UniqueId', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PokemonId', full_name='ResponseEnvelop.WildPokemon.PokemonId', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon', full_name='ResponseEnvelop.WildPokemon.pokemon', index=2,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1769,
serialized_end=1873,
)
_RESPONSEENVELOP_MAPPOKEMONPROTO = _descriptor.Descriptor(
name='MapPokemonProto',
full_name='ResponseEnvelop.MapPokemonProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='SpawnpointId', full_name='ResponseEnvelop.MapPokemonProto.SpawnpointId', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='EncounterId', full_name='ResponseEnvelop.MapPokemonProto.EncounterId', index=1,
number=2, type=4, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PokedexTypeId', full_name='ResponseEnvelop.MapPokemonProto.PokedexTypeId', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ExpirationTimeMs', full_name='ResponseEnvelop.MapPokemonProto.ExpirationTimeMs', index=3,
number=4, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Latitude', full_name='ResponseEnvelop.MapPokemonProto.Latitude', index=4,
number=5, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Longitude', full_name='ResponseEnvelop.MapPokemonProto.Longitude', index=5,
number=6, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1876,
serialized_end=2022,
)
_RESPONSEENVELOP_POKEMONFORTPROTO = _descriptor.Descriptor(
name='PokemonFortProto',
full_name='ResponseEnvelop.PokemonFortProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='FortId', full_name='ResponseEnvelop.PokemonFortProto.FortId', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='LastModifiedMs', full_name='ResponseEnvelop.PokemonFortProto.LastModifiedMs', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Latitude', full_name='ResponseEnvelop.PokemonFortProto.Latitude', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Longitude', full_name='ResponseEnvelop.PokemonFortProto.Longitude', index=3,
number=4, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Team', full_name='ResponseEnvelop.PokemonFortProto.Team', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='GuardPokemonId', full_name='ResponseEnvelop.PokemonFortProto.GuardPokemonId', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='GuardPokemonLevel', full_name='ResponseEnvelop.PokemonFortProto.GuardPokemonLevel', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Enabled', full_name='ResponseEnvelop.PokemonFortProto.Enabled', index=7,
number=8, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='FortType', full_name='ResponseEnvelop.PokemonFortProto.FortType', index=8,
number=9, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='GymPoints', full_name='ResponseEnvelop.PokemonFortProto.GymPoints', index=9,
number=10, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='IsInBattle', full_name='ResponseEnvelop.PokemonFortProto.IsInBattle', index=10,
number=11, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ActivePortModifier', full_name='ResponseEnvelop.PokemonFortProto.ActivePortModifier', index=11,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='LureInfo', full_name='ResponseEnvelop.PokemonFortProto.LureInfo', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='CooldownCompleteMs', full_name='ResponseEnvelop.PokemonFortProto.CooldownCompleteMs', index=13,
number=14, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Sponsor', full_name='ResponseEnvelop.PokemonFortProto.Sponsor', index=14,
number=15, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='RenderingType', full_name='ResponseEnvelop.PokemonFortProto.RenderingType', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2025,
serialized_end=2409,
)
_RESPONSEENVELOP_FORTLUREINFOPROTO = _descriptor.Descriptor(
name='FortLureInfoProto',
full_name='ResponseEnvelop.FortLureInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='FortId', full_name='ResponseEnvelop.FortLureInfoProto.FortId', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown2', full_name='ResponseEnvelop.FortLureInfoProto.unknown2', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ActivePokemonId', full_name='ResponseEnvelop.FortLureInfoProto.ActivePokemonId', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='LureExpiresTimestampMs', full_name='ResponseEnvelop.FortLureInfoProto.LureExpiresTimestampMs', index=3,
number=4, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='DeployerPlayerCodename', full_name='ResponseEnvelop.FortLureInfoProto.DeployerPlayerCodename', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2412,
serialized_end=2554,
)
_RESPONSEENVELOP_POKEMONSUMMARYFORTPROTO = _descriptor.Descriptor(
name='PokemonSummaryFortProto',
full_name='ResponseEnvelop.PokemonSummaryFortProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='FortSummaryId', full_name='ResponseEnvelop.PokemonSummaryFortProto.FortSummaryId', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='LastModifiedMs', full_name='ResponseEnvelop.PokemonSummaryFortProto.LastModifiedMs', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Latitude', full_name='ResponseEnvelop.PokemonSummaryFortProto.Latitude', index=2,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Longitude', full_name='ResponseEnvelop.PokemonSummaryFortProto.Longitude', index=3,
number=4, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2556,
serialized_end=2665,
)
_RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO = _descriptor.Descriptor(
name='ClientSpawnPointProto',
full_name='ResponseEnvelop.ClientSpawnPointProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Latitude', full_name='ResponseEnvelop.ClientSpawnPointProto.Latitude', index=0,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Longitude', full_name='ResponseEnvelop.ClientSpawnPointProto.Longitude', index=1,
number=3, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2667,
serialized_end=2727,
)
_RESPONSEENVELOP_WILDPOKEMONPROTO_POKEMON = _descriptor.Descriptor(
name='Pokemon',
full_name='ResponseEnvelop.WildPokemonProto.Pokemon',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Id', full_name='ResponseEnvelop.WildPokemonProto.Pokemon.Id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PokemonId', full_name='ResponseEnvelop.WildPokemonProto.Pokemon.PokemonId', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2940,
serialized_end=2980,
)
_RESPONSEENVELOP_WILDPOKEMONPROTO = _descriptor.Descriptor(
name='WildPokemonProto',
full_name='ResponseEnvelop.WildPokemonProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='EncounterId', full_name='ResponseEnvelop.WildPokemonProto.EncounterId', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='LastModifiedMs', full_name='ResponseEnvelop.WildPokemonProto.LastModifiedMs', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Latitude', full_name='ResponseEnvelop.WildPokemonProto.Latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Longitude', full_name='ResponseEnvelop.WildPokemonProto.Longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='SpawnPointId', full_name='ResponseEnvelop.WildPokemonProto.SpawnPointId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon', full_name='ResponseEnvelop.WildPokemonProto.pokemon', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='TimeTillHiddenMs', full_name='ResponseEnvelop.WildPokemonProto.TimeTillHiddenMs', index=6,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEENVELOP_WILDPOKEMONPROTO_POKEMON, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2730,
serialized_end=2980,
)
_RESPONSEENVELOP_NEARBYPOKEMONPROTO = _descriptor.Descriptor(
name='NearbyPokemonProto',
full_name='ResponseEnvelop.NearbyPokemonProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='PokedexNumber', full_name='ResponseEnvelop.NearbyPokemonProto.PokedexNumber', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='DistanceMeters', full_name='ResponseEnvelop.NearbyPokemonProto.DistanceMeters', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='EncounterId', full_name='ResponseEnvelop.NearbyPokemonProto.EncounterId', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2982,
serialized_end=3070,
)
_RESPONSEENVELOP_PROFILEPAYLOAD = _descriptor.Descriptor(
name='ProfilePayload',
full_name='ResponseEnvelop.ProfilePayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='ResponseEnvelop.ProfilePayload.unknown1', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='profile', full_name='ResponseEnvelop.ProfilePayload.profile', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3072,
serialized_end=3149,
)
_RESPONSEENVELOP_PROFILE_AVATARDETAILS = _descriptor.Descriptor(
name='AvatarDetails',
full_name='ResponseEnvelop.Profile.AvatarDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown2', full_name='ResponseEnvelop.Profile.AvatarDetails.unknown2', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown3', full_name='ResponseEnvelop.Profile.AvatarDetails.unknown3', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown9', full_name='ResponseEnvelop.Profile.AvatarDetails.unknown9', index=2,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown10', full_name='ResponseEnvelop.Profile.AvatarDetails.unknown10', index=3,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3485,
serialized_end=3573,
)
_RESPONSEENVELOP_PROFILE_DAILYBONUS = _descriptor.Descriptor(
name='DailyBonus',
full_name='ResponseEnvelop.Profile.DailyBonus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='NextCollectTimestampMs', full_name='ResponseEnvelop.Profile.DailyBonus.NextCollectTimestampMs', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='NextDefenderBonusCollectTimestampMs', full_name='ResponseEnvelop.Profile.DailyBonus.NextDefenderBonusCollectTimestampMs', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3575,
serialized_end=3664,
)
_RESPONSEENVELOP_PROFILE_CURRENCY = _descriptor.Descriptor(
name='Currency',
full_name='ResponseEnvelop.Profile.Currency',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='ResponseEnvelop.Profile.Currency.type', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='amount', full_name='ResponseEnvelop.Profile.Currency.amount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3666,
serialized_end=3706,
)
_RESPONSEENVELOP_PROFILE = _descriptor.Descriptor(
name='Profile',
full_name='ResponseEnvelop.Profile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='creation_time', full_name='ResponseEnvelop.Profile.creation_time', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='username', full_name='ResponseEnvelop.Profile.username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team', full_name='ResponseEnvelop.Profile.team', index=2,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tutorial', full_name='ResponseEnvelop.Profile.tutorial', index=3,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avatar', full_name='ResponseEnvelop.Profile.avatar', index=4,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='poke_storage', full_name='ResponseEnvelop.Profile.poke_storage', index=5,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='item_storage', full_name='ResponseEnvelop.Profile.item_storage', index=6,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='daily_bonus', full_name='ResponseEnvelop.Profile.daily_bonus', index=7,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown12', full_name='ResponseEnvelop.Profile.unknown12', index=8,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown13', full_name='ResponseEnvelop.Profile.unknown13', index=9,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currency', full_name='ResponseEnvelop.Profile.currency', index=10,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEENVELOP_PROFILE_AVATARDETAILS, _RESPONSEENVELOP_PROFILE_DAILYBONUS, _RESPONSEENVELOP_PROFILE_CURRENCY, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3152,
serialized_end=3706,
)
_RESPONSEENVELOP = _descriptor.Descriptor(
name='ResponseEnvelop',
full_name='ResponseEnvelop',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unknown1', full_name='ResponseEnvelop.unknown1', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown2', full_name='ResponseEnvelop.unknown2', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='api_url', full_name='ResponseEnvelop.api_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown6', full_name='ResponseEnvelop.unknown6', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown7', full_name='ResponseEnvelop.unknown7', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='ResponseEnvelop.payload', index=5,
number=100, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEENVELOP_UNKNOWN6, _RESPONSEENVELOP_HEARTBEATPAYLOAD, _RESPONSEENVELOP_CLIENTMAPCELL, _RESPONSEENVELOP_WILDPOKEMON, _RESPONSEENVELOP_MAPPOKEMONPROTO, _RESPONSEENVELOP_POKEMONFORTPROTO, _RESPONSEENVELOP_FORTLUREINFOPROTO, _RESPONSEENVELOP_POKEMONSUMMARYFORTPROTO, _RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO, _RESPONSEENVELOP_WILDPOKEMONPROTO, _RESPONSEENVELOP_NEARBYPOKEMONPROTO, _RESPONSEENVELOP_PROFILEPAYLOAD, _RESPONSEENVELOP_PROFILE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=930,
serialized_end=3706,
)
_REQUESTENVELOP_REQUESTS.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_MESSAGESINGLESTRING.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_MESSAGESINGLEINT.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_MESSAGETWOINTS.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_MESSAGEQUAD.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_WAT.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_UNKNOWN3.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_UNKNOWN6_UNKNOWN2.containing_type = _REQUESTENVELOP_UNKNOWN6
_REQUESTENVELOP_UNKNOWN6.fields_by_name['unknown2'].message_type = _REQUESTENVELOP_UNKNOWN6_UNKNOWN2
_REQUESTENVELOP_UNKNOWN6.containing_type = _REQUESTENVELOP
_REQUESTENVELOP_AUTHINFO_JWT.containing_type = _REQUESTENVELOP_AUTHINFO
_REQUESTENVELOP_AUTHINFO.fields_by_name['token'].message_type = _REQUESTENVELOP_AUTHINFO_JWT
_REQUESTENVELOP_AUTHINFO.containing_type = _REQUESTENVELOP
_REQUESTENVELOP.fields_by_name['requests'].message_type = _REQUESTENVELOP_REQUESTS
_REQUESTENVELOP.fields_by_name['unknown6'].message_type = _REQUESTENVELOP_UNKNOWN6
_REQUESTENVELOP.fields_by_name['auth'].message_type = _REQUESTENVELOP_AUTHINFO
_REQUESTENVELOP.fields_by_name['unknown11'].message_type = _UNKNOWNAUTH
_RESPONSEENVELOP_UNKNOWN6_UNKNOWN2.containing_type = _RESPONSEENVELOP_UNKNOWN6
_RESPONSEENVELOP_UNKNOWN6.fields_by_name['unknown2'].message_type = _RESPONSEENVELOP_UNKNOWN6_UNKNOWN2
_RESPONSEENVELOP_UNKNOWN6.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_HEARTBEATPAYLOAD.fields_by_name['cells'].message_type = _RESPONSEENVELOP_CLIENTMAPCELL
_RESPONSEENVELOP_HEARTBEATPAYLOAD.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['Fort'].message_type = _RESPONSEENVELOP_POKEMONFORTPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['SpawnPoint'].message_type = _RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['WildPokemon'].message_type = _RESPONSEENVELOP_WILDPOKEMONPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['FortSummary'].message_type = _RESPONSEENVELOP_POKEMONSUMMARYFORTPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['DecimatedSpawnPoint'].message_type = _RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['MapPokemon'].message_type = _RESPONSEENVELOP_MAPPOKEMONPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.fields_by_name['NearbyPokemon'].message_type = _RESPONSEENVELOP_NEARBYPOKEMONPROTO
_RESPONSEENVELOP_CLIENTMAPCELL.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_WILDPOKEMON.fields_by_name['pokemon'].message_type = _RESPONSEENVELOP_NEARBYPOKEMONPROTO
_RESPONSEENVELOP_WILDPOKEMON.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_MAPPOKEMONPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_POKEMONFORTPROTO.fields_by_name['LureInfo'].message_type = _RESPONSEENVELOP_FORTLUREINFOPROTO
_RESPONSEENVELOP_POKEMONFORTPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_FORTLUREINFOPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_POKEMONSUMMARYFORTPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_WILDPOKEMONPROTO_POKEMON.containing_type = _RESPONSEENVELOP_WILDPOKEMONPROTO
_RESPONSEENVELOP_WILDPOKEMONPROTO.fields_by_name['pokemon'].message_type = _RESPONSEENVELOP_WILDPOKEMONPROTO_POKEMON
_RESPONSEENVELOP_WILDPOKEMONPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_NEARBYPOKEMONPROTO.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_PROFILEPAYLOAD.fields_by_name['profile'].message_type = _RESPONSEENVELOP_PROFILE
_RESPONSEENVELOP_PROFILEPAYLOAD.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP_PROFILE_AVATARDETAILS.containing_type = _RESPONSEENVELOP_PROFILE
_RESPONSEENVELOP_PROFILE_DAILYBONUS.containing_type = _RESPONSEENVELOP_PROFILE
_RESPONSEENVELOP_PROFILE_CURRENCY.containing_type = _RESPONSEENVELOP_PROFILE
_RESPONSEENVELOP_PROFILE.fields_by_name['avatar'].message_type = _RESPONSEENVELOP_PROFILE_AVATARDETAILS
_RESPONSEENVELOP_PROFILE.fields_by_name['daily_bonus'].message_type = _RESPONSEENVELOP_PROFILE_DAILYBONUS
_RESPONSEENVELOP_PROFILE.fields_by_name['currency'].message_type = _RESPONSEENVELOP_PROFILE_CURRENCY
_RESPONSEENVELOP_PROFILE.containing_type = _RESPONSEENVELOP
_RESPONSEENVELOP.fields_by_name['unknown6'].message_type = _RESPONSEENVELOP_UNKNOWN6
_RESPONSEENVELOP.fields_by_name['unknown7'].message_type = _UNKNOWNAUTH
DESCRIPTOR.message_types_by_name['RequestEnvelop'] = _REQUESTENVELOP
DESCRIPTOR.message_types_by_name['UnknownAuth'] = _UNKNOWNAUTH
DESCRIPTOR.message_types_by_name['ResponseEnvelop'] = _RESPONSEENVELOP
RequestEnvelop = _reflection.GeneratedProtocolMessageType('RequestEnvelop', (_message.Message,), dict(
Requests = _reflection.GeneratedProtocolMessageType('Requests', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_REQUESTS,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.Requests)
))
,
MessageSingleString = _reflection.GeneratedProtocolMessageType('MessageSingleString', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_MESSAGESINGLESTRING,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.MessageSingleString)
))
,
MessageSingleInt = _reflection.GeneratedProtocolMessageType('MessageSingleInt', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_MESSAGESINGLEINT,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.MessageSingleInt)
))
,
MessageTwoInts = _reflection.GeneratedProtocolMessageType('MessageTwoInts', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_MESSAGETWOINTS,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.MessageTwoInts)
))
,
MessageQuad = _reflection.GeneratedProtocolMessageType('MessageQuad', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_MESSAGEQUAD,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.MessageQuad)
))
,
Wat = _reflection.GeneratedProtocolMessageType('Wat', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_WAT,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.Wat)
))
,
Unknown3 = _reflection.GeneratedProtocolMessageType('Unknown3', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_UNKNOWN3,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.Unknown3)
))
,
Unknown6 = _reflection.GeneratedProtocolMessageType('Unknown6', (_message.Message,), dict(
Unknown2 = _reflection.GeneratedProtocolMessageType('Unknown2', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_UNKNOWN6_UNKNOWN2,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.Unknown6.Unknown2)
))
,
DESCRIPTOR = _REQUESTENVELOP_UNKNOWN6,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.Unknown6)
))
,
AuthInfo = _reflection.GeneratedProtocolMessageType('AuthInfo', (_message.Message,), dict(
JWT = _reflection.GeneratedProtocolMessageType('JWT', (_message.Message,), dict(
DESCRIPTOR = _REQUESTENVELOP_AUTHINFO_JWT,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.AuthInfo.JWT)
))
,
DESCRIPTOR = _REQUESTENVELOP_AUTHINFO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop.AuthInfo)
))
,
DESCRIPTOR = _REQUESTENVELOP,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:RequestEnvelop)
))
_sym_db.RegisterMessage(RequestEnvelop)
_sym_db.RegisterMessage(RequestEnvelop.Requests)
_sym_db.RegisterMessage(RequestEnvelop.MessageSingleString)
_sym_db.RegisterMessage(RequestEnvelop.MessageSingleInt)
_sym_db.RegisterMessage(RequestEnvelop.MessageTwoInts)
_sym_db.RegisterMessage(RequestEnvelop.MessageQuad)
_sym_db.RegisterMessage(RequestEnvelop.Wat)
_sym_db.RegisterMessage(RequestEnvelop.Unknown3)
_sym_db.RegisterMessage(RequestEnvelop.Unknown6)
_sym_db.RegisterMessage(RequestEnvelop.Unknown6.Unknown2)
_sym_db.RegisterMessage(RequestEnvelop.AuthInfo)
_sym_db.RegisterMessage(RequestEnvelop.AuthInfo.JWT)
UnknownAuth = _reflection.GeneratedProtocolMessageType('UnknownAuth', (_message.Message,), dict(
DESCRIPTOR = _UNKNOWNAUTH,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:UnknownAuth)
))
_sym_db.RegisterMessage(UnknownAuth)
ResponseEnvelop = _reflection.GeneratedProtocolMessageType('ResponseEnvelop', (_message.Message,), dict(
Unknown6 = _reflection.GeneratedProtocolMessageType('Unknown6', (_message.Message,), dict(
Unknown2 = _reflection.GeneratedProtocolMessageType('Unknown2', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_UNKNOWN6_UNKNOWN2,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Unknown6.Unknown2)
))
,
DESCRIPTOR = _RESPONSEENVELOP_UNKNOWN6,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Unknown6)
))
,
HeartbeatPayload = _reflection.GeneratedProtocolMessageType('HeartbeatPayload', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_HEARTBEATPAYLOAD,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.HeartbeatPayload)
))
,
ClientMapCell = _reflection.GeneratedProtocolMessageType('ClientMapCell', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_CLIENTMAPCELL,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.ClientMapCell)
))
,
WildPokemon = _reflection.GeneratedProtocolMessageType('WildPokemon', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_WILDPOKEMON,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.WildPokemon)
))
,
MapPokemonProto = _reflection.GeneratedProtocolMessageType('MapPokemonProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_MAPPOKEMONPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.MapPokemonProto)
))
,
PokemonFortProto = _reflection.GeneratedProtocolMessageType('PokemonFortProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_POKEMONFORTPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.PokemonFortProto)
))
,
FortLureInfoProto = _reflection.GeneratedProtocolMessageType('FortLureInfoProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_FORTLUREINFOPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.FortLureInfoProto)
))
,
PokemonSummaryFortProto = _reflection.GeneratedProtocolMessageType('PokemonSummaryFortProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_POKEMONSUMMARYFORTPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.PokemonSummaryFortProto)
))
,
ClientSpawnPointProto = _reflection.GeneratedProtocolMessageType('ClientSpawnPointProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_CLIENTSPAWNPOINTPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.ClientSpawnPointProto)
))
,
WildPokemonProto = _reflection.GeneratedProtocolMessageType('WildPokemonProto', (_message.Message,), dict(
Pokemon = _reflection.GeneratedProtocolMessageType('Pokemon', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_WILDPOKEMONPROTO_POKEMON,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.WildPokemonProto.Pokemon)
))
,
DESCRIPTOR = _RESPONSEENVELOP_WILDPOKEMONPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.WildPokemonProto)
))
,
NearbyPokemonProto = _reflection.GeneratedProtocolMessageType('NearbyPokemonProto', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_NEARBYPOKEMONPROTO,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.NearbyPokemonProto)
))
,
ProfilePayload = _reflection.GeneratedProtocolMessageType('ProfilePayload', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_PROFILEPAYLOAD,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.ProfilePayload)
))
,
Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
AvatarDetails = _reflection.GeneratedProtocolMessageType('AvatarDetails', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_PROFILE_AVATARDETAILS,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Profile.AvatarDetails)
))
,
DailyBonus = _reflection.GeneratedProtocolMessageType('DailyBonus', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_PROFILE_DAILYBONUS,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Profile.DailyBonus)
))
,
Currency = _reflection.GeneratedProtocolMessageType('Currency', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEENVELOP_PROFILE_CURRENCY,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Profile.Currency)
))
,
DESCRIPTOR = _RESPONSEENVELOP_PROFILE,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop.Profile)
))
,
DESCRIPTOR = _RESPONSEENVELOP,
__module__ = 'pokemon_pb2'
# @@protoc_insertion_point(class_scope:ResponseEnvelop)
))
_sym_db.RegisterMessage(ResponseEnvelop)
_sym_db.RegisterMessage(ResponseEnvelop.Unknown6)
_sym_db.RegisterMessage(ResponseEnvelop.Unknown6.Unknown2)
_sym_db.RegisterMessage(ResponseEnvelop.HeartbeatPayload)
_sym_db.RegisterMessage(ResponseEnvelop.ClientMapCell)
_sym_db.RegisterMessage(ResponseEnvelop.WildPokemon)
_sym_db.RegisterMessage(ResponseEnvelop.MapPokemonProto)
_sym_db.RegisterMessage(ResponseEnvelop.PokemonFortProto)
_sym_db.RegisterMessage(ResponseEnvelop.FortLureInfoProto)
_sym_db.RegisterMessage(ResponseEnvelop.PokemonSummaryFortProto)
_sym_db.RegisterMessage(ResponseEnvelop.ClientSpawnPointProto)
_sym_db.RegisterMessage(ResponseEnvelop.WildPokemonProto)
_sym_db.RegisterMessage(ResponseEnvelop.WildPokemonProto.Pokemon)
_sym_db.RegisterMessage(ResponseEnvelop.NearbyPokemonProto)
_sym_db.RegisterMessage(ResponseEnvelop.ProfilePayload)
_sym_db.RegisterMessage(ResponseEnvelop.Profile)
_sym_db.RegisterMessage(ResponseEnvelop.Profile.AvatarDetails)
_sym_db.RegisterMessage(ResponseEnvelop.Profile.DailyBonus)
_sym_db.RegisterMessage(ResponseEnvelop.Profile.Currency)
# @@protoc_insertion_point(module_scope)
| mit |
lateminer/bitcoin | test/functional/p2p_message_capture.py | 22 | 2883 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test per-peer message capture capability.
Additionally, the output of contrib/message-capture/message-capture-parser.py should be verified manually.
"""
import glob
from io import BytesIO
import os
from test_framework.p2p import P2PDataStore, MESSAGEMAP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
TIME_SIZE = 8
LENGTH_SIZE = 4
MSGTYPE_SIZE = 12
def mini_parser(dat_file):
"""Parse a data file created by CaptureMessage.
From the data file we'll only check the structure.
We won't care about things like:
- Deserializing the payload of the message
- This is managed by the deserialize methods in test_framework.messages
- The order of the messages
- There's no reason why we can't, say, change the order of the messages in the handshake
- Message Type
- We can add new message types
We're ignoring these because they're simply too brittle to test here.
"""
with open(dat_file, 'rb') as f_in:
# This should have at least one message in it
assert(os.fstat(f_in.fileno()).st_size >= TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE)
while True:
tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE)
if not tmp_header_raw:
break
tmp_header = BytesIO(tmp_header_raw)
tmp_header.read(TIME_SIZE) # skip the timestamp field
raw_msgtype = tmp_header.read(MSGTYPE_SIZE)
msgtype: bytes = raw_msgtype.split(b'\x00', 1)[0]
remainder = raw_msgtype.split(b'\x00', 1)[1]
assert(len(msgtype) > 0)
assert(msgtype in MESSAGEMAP)
assert(len(remainder) == 0 or not remainder.decode().isprintable())
length: int = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little")
data = f_in.read(length)
assert_equal(len(data), length)
class MessageCaptureTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-capturemessages"]]
self.setup_clean_chain = True
def run_test(self):
capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture")
# Connect a node so that the handshake occurs
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].disconnect_p2ps()
recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0]
mini_parser(recv_file)
sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0]
mini_parser(sent_file)
if __name__ == '__main__':
MessageCaptureTest().main()
| mit |
thonkify/thonkify | src/lib/requests/help.py | 6 | 3500 | """Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import urllib3
import chardet
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
# OPENSSL_VERSION_NUMBER doesn't exist in the Python 2.6 ssl module.
system_ssl = getattr(ssl, 'OPENSSL_VERSION_NUMBER', None)
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| mit |
saurabh6790/omnisys-lib | core/report/todo/todo.py | 8 | 1238 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.widgets.reportview import execute as runreport
from webnotes.utils import getdate
def execute(filters=None):
priority_map = {"High": 3, "Medium": 2, "Low": 1}
todo_list = runreport(doctype="ToDo", fields=["name", "date", "description",
"priority", "reference_type", "reference_name", "assigned_by", "owner"],
filters=[["ToDo", "checked", "!=", 1]])
todo_list.sort(key=lambda todo: (priority_map.get(todo.priority, 0),
todo.date and getdate(todo.date) or getdate("1900-01-01")), reverse=True)
columns = ["ID:Link/ToDo:90", "Priority::60", "Date:Date", "Description::150",
"Assigned To/Owner:Data:120", "Assigned By:Data:120", "Reference::200"]
result = []
for todo in todo_list:
if todo.reference_type:
todo.reference = """<a href="#Form/%s/%s">%s: %s</a>""" % \
(todo.reference_type, todo.reference_name, todo.reference_type, todo.reference_name)
else:
todo.reference = None
result.append([todo.name, todo.priority, todo.date, todo.description,
todo.owner, todo.assigned_by, todo.reference])
return columns, result
| mit |
xiangel/hue | apps/metastore/src/metastore/conf.py | 3 | 1068 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from desktop.lib.conf import Config
HS2_GET_TABLES_MAX = Config(
key="hs2_get_tables_max",
help=_("The max number of records in the result set permitted to do a HS2 GetTables call."),
type=int,
default=1000
)
| apache-2.0 |
Shaswat27/sympy | sympy/integrals/tests/test_rde.py | 95 | 8769 | """Most of these tests come from the examples in Bronstein's book."""
from sympy import Poly, S, symbols, oo, I
from sympy.integrals.risch import (DifferentialExtension,
NonElementaryIntegralException)
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
normal_denom, special_denom, bound_degree, spde, solve_poly_rde,
no_cancel_equal, cancel_primitive, cancel_exp, rischDE)
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, t, z, n
t0, t1, t2, k = symbols('t:3 k')
def test_order_at():
a = Poly(t**4, t)
b = Poly((t**2 + 1)**3*t, t)
c = Poly((t**2 + 1)**6*t, t)
d = Poly((t**2 + 1)**10*t**10, t)
e = Poly((t**2 + 1)**100*t**37, t)
p1 = Poly(t, t)
p2 = Poly(1 + t**2, t)
assert order_at(a, p1, t) == 4
assert order_at(b, p1, t) == 1
assert order_at(c, p1, t) == 1
assert order_at(d, p1, t) == 10
assert order_at(e, p1, t) == 37
assert order_at(a, p2, t) == 0
assert order_at(b, p2, t) == 3
assert order_at(c, p2, t) == 6
assert order_at(d, p1, t) == 10
assert order_at(e, p2, t) == 100
assert order_at(Poly(0, t), Poly(t, t), t) == oo
assert order_at_oo(Poly(t**2 - 1, t), Poly(t + 1), t) == \
order_at_oo(Poly(t - 1, t), Poly(1, t), t) == -1
assert order_at_oo(Poly(0, t), Poly(1, t), t) == oo
def test_weak_normalizer():
a = Poly((1 + x)*t**5 + 4*t**4 + (-1 - 3*x)*t**3 - 4*t**2 + (-2 + 2*x)*t, t)
d = Poly(t**4 - 3*t**2 + 2, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
r = weak_normalizer(a, d, DE, z)
assert r == (Poly(t**5 - t**4 - 4*t**3 + 4*t**2 + 4*t - 4, t),
(Poly((1 + x)*t**2 + x*t, t), Poly(t + 1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE) == (Poly(1, t), r[1])
r = weak_normalizer(Poly(1 + t**2), Poly(t**2 - 1, t), DE, z)
assert r == (Poly(t**4 - 2*t**2 + 1, t), (Poly(-3*t**2 + 1, t), Poly(t**2 - 1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1])
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2)]})
r = weak_normalizer(Poly(1 + t**2), Poly(t, t), DE, z)
assert r == (Poly(t, t), (Poly(0, t), Poly(1, t)))
assert weak_normalizer(r[1][0], r[1][1], DE, z) == (Poly(1, t), r[1])
def test_normal_denom():
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
raises(NonElementaryIntegralException, lambda: normal_denom(Poly(1, x), Poly(1, x),
Poly(1, x), Poly(x, x), DE))
fa, fd = Poly(t**2 + 1, t), Poly(1, t)
ga, gd = Poly(1, t), Poly(t**2, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert normal_denom(fa, fd, ga, gd, DE) == \
(Poly(t, t), (Poly(t**3 - t**2 + t - 1, t), Poly(1, t)), (Poly(1, t),
Poly(1, t)), Poly(t, t))
def test_special_denom():
# TODO: add more tests here
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert special_denom(Poly(1, t), Poly(t**2, t), Poly(1, t), Poly(t**2 - 1, t),
Poly(t, t), DE) == \
(Poly(1, t), Poly(t**2 - 1, t), Poly(t**2 - 1, t), Poly(t, t))
# assert special_denom(Poly(1, t), Poly(2*x, t), Poly((1 + 2*x)*t, t), DE) == 1
# issue 3940
# Note, this isn't a very good test, because the denominator is just 1,
# but at least it tests the exp cancellation case
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-2*x*t0, t0),
Poly(I*k*t1, t1)]})
DE.decrement_level()
assert special_denom(Poly(1, t0), Poly(I*k, t0), Poly(1, t0), Poly(t0, t0),
Poly(1, t0), DE) == \
(Poly(1, t0), Poly(I*k, t0), Poly(t0, t0), Poly(1, t0))
@XFAIL
def test_bound_degree_fail():
# Primitive
DE = DifferentialExtension(extension={'D': [Poly(1, x),
Poly(t0/x**2, t0), Poly(1/x, t)]})
assert bound_degree(Poly(t**2, t), Poly(-(1/x**2*t**2 + 1/x), t),
Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/2*x*t**2 + x*t,
t), DE) == 3
def test_bound_degree():
# Base
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert bound_degree(Poly(1, x), Poly(-2*x, x), Poly(1, x), DE) == 0
# Primitive (see above test_bound_degree_fail)
# TODO: Add test for when the degree bound becomes larger after limited_integrate
# TODO: Add test for db == da - 1 case
# Exp
# TODO: Add tests
# TODO: Add test for when the degree becomes larger after parametric_log_deriv()
# Nonlinear
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert bound_degree(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), DE) == 0
def test_spde():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
raises(NonElementaryIntegralException, lambda: spde(Poly(t, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert spde(Poly(t**2 + x*t*2 + x**2, t), Poly(t**2/x**2 + (2/x - 1)*t, t),
Poly(t**2/x**2 + (2/x - 1)*t, t), 0, DE) == \
(Poly(0, t), Poly(0, t), 0, Poly(0, t), Poly(1, t))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t0/x**2, t0), Poly(1/x, t)]})
assert spde(Poly(t**2, t), Poly(-t**2/x**2 - 1/x, t),
Poly((2*x - 1)*t**4 + (t0 + x)/x*t**3 - (t0 + 4*x**2)/(2*x)*t**2 + x*t, t), 3, DE) == \
(Poly(0, t), Poly(0, t), 0, Poly(0, t),
Poly(t0*t**2/2 + x**2*t**2 - x**2*t, t))
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 +
3*x**4/4 + x**3 - x**2 + 1, x), 4, DE) == \
(Poly(0, x), Poly(x/2 - S(1)/4, x), 2, Poly(x**2 + x + 1, x), Poly(5*x/4, x))
assert spde(Poly(x**2 + x + 1, x), Poly(-2*x - 1, x), Poly(x**5/2 +
3*x**4/4 + x**3 - x**2 + 1, x), n, DE) == \
(Poly(0, x), Poly(x/2 - S(1)/4, x), -2 + n, Poly(x**2 + x + 1, x), Poly(5*x/4, x))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1, t)]})
raises(NonElementaryIntegralException, lambda: spde(Poly((t - 1)*(t**2 + 1)**2, t), Poly((t - 1)*(t**2 + 1), t), Poly(1, t), 0, DE))
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert spde(Poly(x**2 - x, x), Poly(1, x), Poly(9*x**4 - 10*x**3 + 2*x**2, x), 4, DE) == (Poly(0, x), Poly(0, x), 0, Poly(0, x), Poly(3*x**3 - 2*x**2, x))
assert spde(Poly(x**2 - x, x), Poly(x**2 - 5*x + 3, x), Poly(x**7 - x**6 - 2*x**4 + 3*x**3 - x**2, x), 5, DE) == \
(Poly(1, x), Poly(x + 1, x), 1, Poly(x**4 - x**3, x), Poly(x**3 - x**2, x))
def test_solve_poly_rde_no_cancel():
# deg(b) large
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert solve_poly_rde(Poly(t**2 + 1, t), Poly(t**3 + (x + 1)*t**2 + t + x + 2, t),
oo, DE) == Poly(t + x, t)
# deg(b) small
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert solve_poly_rde(Poly(0, x), Poly(x/2 - S(1)/4, x), oo, DE) == \
Poly(x**2/4 - x/4, x)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert solve_poly_rde(Poly(2, t), Poly(t**2 + 2*t + 3, t), 1, DE) == \
Poly(t + 1, t, x)
# deg(b) == deg(D) - 1
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert no_cancel_equal(Poly(1 - t, t),
Poly(t**3 + t**2 - 2*x*t - 2*x, t), oo, DE) == \
(Poly(t**2, t), 1, Poly((-2 - 2*x)*t - 2*x, t))
def test_solve_poly_rde_cancel():
# exp
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert cancel_exp(Poly(2*x, t), Poly(2*x, t), 0, DE) == \
Poly(1, t)
assert cancel_exp(Poly(2*x, t), Poly((1 + 2*x)*t, t), 1, DE) == \
Poly(t, t)
# TODO: Add more exp tests, including tests that require is_deriv_in_field()
# primitive
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
# If the DecrementLevel context manager is working correctly, this shouldn't
# cause any problems with the further tests.
raises(NonElementaryIntegralException, lambda: cancel_primitive(Poly(1, t), Poly(t, t), oo, DE))
assert cancel_primitive(Poly(1, t), Poly(t + 1/x, t), 2, DE) == \
Poly(t, t)
assert cancel_primitive(Poly(4*x, t), Poly(4*x*t**2 + 2*t/x, t), 3, DE) == \
Poly(t**2, t)
# TODO: Add more primitive tests, including tests that require is_deriv_in_field()
def test_rischDE():
# TODO: Add more tests for rischDE, including ones from the text
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
DE.decrement_level()
assert rischDE(Poly(-2*x, x), Poly(1, x), Poly(1 - 2*x - 2*x**2, x),
Poly(1, x), DE) == \
(Poly(x + 1, x), Poly(1, x))
| bsd-3-clause |
childsish/lhc-python | tests/test_collections/test_interval_set.py | 1 | 1419 | import unittest
from lhc.collections import IntervalSet
from lhc.interval import Interval
class TestIntervalSet(unittest.TestCase):
def test_add(self):
set_ = IntervalSet()
set_.add(Interval(0, 1000))
self.assertEqual(1, len(set_))
self.assertTrue(any(Interval(0, 1000) in bin for bin in set_.bins.values()))
def test_init(self):
set_ = IntervalSet([Interval(0, 1000), Interval(1000, 2000), Interval(2000, 3000)])
self.assertEqual(3, len(set_))
self.assertTrue(any(Interval(0, 1000) in bin for bin in set_.bins.values()))
self.assertTrue(any(Interval(1000, 2000) in bin for bin in set_.bins.values()))
self.assertTrue(any(Interval(2000, 3000) in bin for bin in set_.bins.values()))
def test_contains(self):
set_ = IntervalSet([Interval(0, 1000), Interval(1000, 2000), Interval(2000, 3000)])
self.assertIn(Interval(0, 1000), set_)
self.assertIn(Interval(1000, 2000), set_)
self.assertIn(Interval(2000, 3000), set_)
def test_fetch(self):
set_ = IntervalSet([Interval(0, 1000), Interval(1000, 2000), Interval(2000, 3000)])
it = set_.fetch(Interval(500, 1500))
self.assertEqual(Interval(0, 1000), next(it))
self.assertEqual(Interval(1000, 2000), next(it))
self.assertRaises(StopIteration, it.__next__)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ltiao/networkx | networkx/__init__.py | 40 | 2409 | """
NetworkX
========
NetworkX (NX) is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
https://networkx.lanl.gov/
Using
-----
Just write in Python
>>> import networkx as nx
>>> G=nx.Graph()
>>> G.add_edge(1,2)
>>> G.add_node(42)
>>> print(sorted(G.nodes()))
[1, 2, 42]
>>> print(sorted(G.edges()))
[(1, 2)]
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Add platform dependent shared library path to sys.path
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 7):
m = "Python 2.7 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
# Release data
from networkx import release
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
(release.authors['Hagberg'] + release.authors['Schult'] +
release.authors['Swart'])
__license__ = release.license
__date__ = release.date
__version__ = release.version
__bibtex__ = """@inproceedings{hagberg-2008-exploring,
author = {Aric A. Hagberg and Daniel A. Schult and Pieter J. Swart},
title = {Exploring network structure, dynamics, and function using {NetworkX}},
year = {2008},
month = Aug,
urlpdf = {http://math.lanl.gov/~hagberg/Papers/hagberg-2008-exploring.pdf},
booktitle = {Proceedings of the 7th Python in Science Conference (SciPy2008)},
editors = {G\"{a}el Varoquaux, Travis Vaught, and Jarrod Millman},
address = {Pasadena, CA USA},
pages = {11--15}
}"""
# These are import orderwise
from networkx.exception import *
import networkx.external
import networkx.utils
import networkx.classes
from networkx.classes import *
import networkx.convert
from networkx.convert import *
import networkx.convert_matrix
from networkx.convert_matrix import *
import networkx.relabel
from networkx.relabel import *
import networkx.generators
from networkx.generators import *
import networkx.readwrite
from networkx.readwrite import *
# Need to test with SciPy, when available
import networkx.algorithms
from networkx.algorithms import *
import networkx.linalg
from networkx.linalg import *
from networkx.tests.test import run as test
import networkx.drawing
from networkx.drawing import *
| bsd-3-clause |
deltreey/ansible | contrib/inventory/zone.py | 111 | 1463 | #!/usr/bin/env python
# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
result['all']['hosts'].append(s[1])
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'zone'})
else:
print "Need an argument, either --list or --host <host>"
| gpl-3.0 |
DataSenseAnalytics/AirlineApp | src/main/python/airlineapp/datamodel/flightmaster.py | 1 | 1240 | from smv import *
from pyspark.sql.functions import *
from airlineapp.datamodel import inputset
from airlineapp.core import utils
__all__ = ['FlightMaster']
class FlightMaster(SmvPyModule, SmvPyOutput):
"""Derive flight level master"""
def requiresDS(self):
return [inputset.DimFlightLeg, inputset.DimGeo]
def run(self, i):
flt_df = i[inputset.DimFlightLeg]
geo_df = i[inputset.DimGeo]
return flt_df.join(geo_df, col("SCH_LEG_ORIG_CD") == col("LOCATION_ID"), "left_outer").\
drop("LOCATION_ID").\
smvSelectPlus(
col("SCH_LEG_DEST_CD").alias("LOCATION_ID")
).\
smvJoinByKey(geo_df, ["LOCATION_ID"], "leftouter").\
smvSelectPlus(
utils.calDist(
col("LATITUDE"),
col("_LATITUDE"),
col("LONGITUDE"),
col("_LONGITUDE")
).alias("orig_dest_dist")
).\
smvSelectPlus(
when(col("orig_dest_dist") > 3000, "L").otherwise("S").alias("haul_type"),
substring(col("FLT_LEG_DPRT_TM"), 12, 2).alias("flt_leg_dprt_hr")
)
# need postfix in smvJoinByKey
| apache-2.0 |
bitsuperlab/cpp-play2 | programs/genesis_util/sort_objects.py | 12 | 1421 | #!/usr/bin/env python3
import argparse
import json
import sys
def dump_json(obj, out, pretty):
if pretty:
json.dump(obj, out, indent=2, sort_keys=True)
else:
json.dump(obj, out, separators=(",", ":"), sort_keys=True)
return
def main():
parser = argparse.ArgumentParser(description="Sort initial_accounts and initial_assets by \"id\" member, then remove \"id\" member")
parser.add_argument("-o", "--output", metavar="OUT", default="-", help="output filename (default: stdout)")
parser.add_argument("-i", "--input", metavar="IN", default="-", help="input filename (default: stdin)")
parser.add_argument("-p", "--pretty", action="store_true", default=False, help="pretty print output")
opts = parser.parse_args()
if opts.input == "-":
genesis = json.load(sys.stdin)
else:
with open(opts.input, "r") as f:
genesis = json.load(f)
genesis["initial_assets"].sort( key=lambda e : e["id"] )
genesis["initial_accounts"].sort( key=lambda e : e["id"] )
for e in genesis["initial_assets"]:
del e["id"]
for e in genesis["initial_accounts"]:
del e["id"]
if opts.output == "-":
dump_json( genesis, sys.stdout, opts.pretty )
sys.stdout.flush()
else:
with open(opts.output, "w") as f:
dump_json( genesis, f, opts.pretty )
return
if __name__ == "__main__":
main()
| mit |
surgebiswas/poker | PokerBots_2017/Johnny/numpy/distutils/command/config_compiler.py | 250 | 4379 | from __future__ import division, absolute_import, print_function
from distutils.core import Command
from numpy.distutils import log
#XXX: Linker flags
def show_fortran_compilers(_cache=[]):
# Using cache to prevent infinite recursion
if _cache: return
_cache.append(1)
from numpy.distutils.fcompiler import show_fcompilers
import distutils.core
dist = distutils.core._setup_distribution
show_fcompilers(dist)
class config_fc(Command):
""" Distutils command to hold user specified options
to Fortran compilers.
config_fc command is used by the FCompiler.customize() method.
"""
description = "specify Fortran 77/Fortran 90 compiler information"
user_options = [
('fcompiler=', None, "specify Fortran compiler type"),
('f77exec=', None, "specify F77 compiler command"),
('f90exec=', None, "specify F90 compiler command"),
('f77flags=', None, "specify F77 compiler flags"),
('f90flags=', None, "specify F90 compiler flags"),
('opt=', None, "specify optimization flags"),
('arch=', None, "specify architecture specific optimization flags"),
('debug', 'g', "compile with debugging information"),
('noopt', None, "compile without optimization"),
('noarch', None, "compile without arch-dependent optimization"),
]
help_options = [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
boolean_options = ['debug', 'noopt', 'noarch']
def initialize_options(self):
self.fcompiler = None
self.f77exec = None
self.f90exec = None
self.f77flags = None
self.f90flags = None
self.opt = None
self.arch = None
self.debug = None
self.noopt = None
self.noarch = None
def finalize_options(self):
log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
build_clib = self.get_finalized_command('build_clib')
build_ext = self.get_finalized_command('build_ext')
config = self.get_finalized_command('config')
build = self.get_finalized_command('build')
cmd_list = [self, config, build_clib, build_ext, build]
for a in ['fcompiler']:
l = []
for c in cmd_list:
v = getattr(c, a)
if v is not None:
if not isinstance(v, str): v = v.compiler_type
if v not in l: l.append(v)
if not l: v1 = None
else: v1 = l[0]
if len(l)>1:
log.warn(' commands have different --%s options: %s'\
', using first in list as default' % (a, l))
if v1:
for c in cmd_list:
if getattr(c, a) is None: setattr(c, a, v1)
def run(self):
# Do nothing.
return
class config_cc(Command):
""" Distutils command to hold user specified options
to C/C++ compilers.
"""
description = "specify C/C++ compiler information"
user_options = [
('compiler=', None, "specify C/C++ compiler type"),
]
def initialize_options(self):
self.compiler = None
def finalize_options(self):
log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
build_clib = self.get_finalized_command('build_clib')
build_ext = self.get_finalized_command('build_ext')
config = self.get_finalized_command('config')
build = self.get_finalized_command('build')
cmd_list = [self, config, build_clib, build_ext, build]
for a in ['compiler']:
l = []
for c in cmd_list:
v = getattr(c, a)
if v is not None:
if not isinstance(v, str): v = v.compiler_type
if v not in l: l.append(v)
if not l: v1 = None
else: v1 = l[0]
if len(l)>1:
log.warn(' commands have different --%s options: %s'\
', using first in list as default' % (a, l))
if v1:
for c in cmd_list:
if getattr(c, a) is None: setattr(c, a, v1)
return
def run(self):
# Do nothing.
return
| mit |
rekhajoshm/spark | examples/src/main/python/ml/index_to_string_example.py | 123 | 2014 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import IndexToString, StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("IndexToStringExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = indexer.fit(df)
indexed = model.transform(df)
print("Transformed string column '%s' to indexed column '%s'"
% (indexer.getInputCol(), indexer.getOutputCol()))
indexed.show()
print("StringIndexer will store labels in output column metadata\n")
converter = IndexToString(inputCol="categoryIndex", outputCol="originalCategory")
converted = converter.transform(indexed)
print("Transformed indexed column '%s' back to original string column '%s' using "
"labels in metadata" % (converter.getInputCol(), converter.getOutputCol()))
converted.select("id", "categoryIndex", "originalCategory").show()
# $example off$
spark.stop()
| apache-2.0 |
doctorOb/thoughtsbydrob | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/vim.py | 364 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| mit |
takluyver/xray | xray/indexing.py | 1 | 8564 | import numpy as np
import utils
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def canonicalize_indexer(key, ndim):
"""Given an indexer for orthogonal array indexing, return an indexer that
is a tuple composed entirely of slices, integer ndarrays and native python
ints.
"""
def canonicalize(indexer):
if not isinstance(indexer, slice):
indexer = np.asarray(indexer)
if indexer.ndim == 0:
indexer = int(np.asscalar(indexer))
if isinstance(indexer, np.ndarray):
if indexer.ndim != 1:
raise ValueError('orthogonal array indexing only supports '
'1d arrays')
if indexer.dtype.kind == 'b':
indexer, = np.nonzero(indexer)
elif indexer.dtype.kind != 'i':
raise ValueError('invalid subkey %r for integer based '
'array indexing; all subkeys must be '
'slices, integers or sequences of '
'integers or Booleans' % indexer)
return indexer
return tuple(canonicalize(k) for k in expanded_indexer(key, ndim))
def orthogonal_indexer(key, shape):
"""Given a key for orthogonal array indexing, returns an equivalent key
suitable for indexing a numpy.ndarray with fancy indexing.
"""
def expand_key(k, length):
if isinstance(k, slice):
return np.arange(k.start or 0, k.stop or length, k.step or 1)
else:
return k
# replace Ellipsis objects with slices
key = list(canonicalize_indexer(key, len(shape)))
# replace 1d arrays and slices with broadcast compatible arrays
# note: we treat integers separately (instead of turning them into 1d
# arrays) because integers (and only integers) collapse axes when used with
# __getitem__
non_int_keys = [n for n, k in enumerate(key) if not isinstance(k, int)]
def full_slices_unselected(n_list):
def all_full_slices(key_index):
return all(isinstance(key[n], slice) and key[n] == slice(None)
for n in key_index)
if not n_list:
return n_list
elif all_full_slices(range(n_list[0] + 1)):
return full_slices_unselected(n_list[1:])
elif all_full_slices(range(n_list[-1], len(key))):
return full_slices_unselected(n_list[:-1])
else:
return n_list
# However, testing suggests it is OK to keep contiguous sequences of full
# slices at the start or the end of the key. Keeping slices around (when
# possible) instead of converting slices to arrays significantly speeds up
# indexing.
# (Honestly, I don't understand when it's not OK to keep slices even in
# between integer indices if as array is somewhere in the key, but such are
# the admittedly mind-boggling ways of numpy's advanced indexing.)
array_keys = full_slices_unselected(non_int_keys)
array_indexers = np.ix_(*(expand_key(key[n], shape[n])
for n in array_keys))
for i, n in enumerate(array_keys):
key[n] = array_indexers[i]
return tuple(key)
def convert_label_indexer(index, label, index_name=''):
"""Given a pandas.Index (or xray.Coordinate) and labels (e.g., from
__getitem__) for one dimension, return an indexer suitable for indexing an
ndarray along that dimension
"""
if isinstance(label, slice):
indexer = index.slice_indexer(label.start, label.stop, label.step)
else:
label = np.asarray(label)
if label.ndim == 0:
indexer = index.get_loc(np.asscalar(label))
else:
indexer = index.get_indexer(label)
if np.any(indexer < 0):
raise ValueError('not all values found in index %r'
% index_name)
return indexer
def remap_label_indexers(data_obj, indexers):
"""Given an xray data object and label based indexers, return a mapping
of equivalent location based indexers.
"""
return {dim: convert_label_indexer(data_obj.coordinates[dim], label, dim)
for dim, label in indexers.iteritems()}
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + step
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, (int, slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class LazilyIndexedArray(utils.NDArrayMixin):
"""Wrap an array that handles orthogonal indexing to make indexing lazy
"""
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : tuple, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if key is None:
key = (slice(None),) * array.ndim
self.array = array
self.key = key
def _updated_key(self, new_key):
new_key = iter(canonicalize_indexer(new_key, self.ndim))
key = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, int):
key.append(k)
else:
key.append(_index_indexer_1d(k, new_key.next(), size))
return tuple(key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, slice):
shape.append(len(xrange(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
@property
def values(self):
return self.array[self.key]
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=None)
def __getitem__(self, key):
return type(self)(self.array, self._updated_key(key))
def __setitem__(self, key, value):
key = self._updated_key(key)
self.array[key] = value
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.